filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
Codes/train_H.py | import tensorflow as tf
import os
from models import H_estimator, disjoint_augment_image_pair
from loss_functions import intensity_loss, depth_consistency_loss3
from utils import load, save, DataLoader
import constant
import numpy as np
os.environ['CUDA_DEVICES_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = constant.GPU
train_folder = constant.TRAIN_FOLDER
test_folder = constant.TEST_FOLDER
batch_size = constant.TRAIN_BATCH_SIZE
iterations = constant.ITERATIONS
height, width = 512, 512
summary_dir = constant.SUMMARY_DIR
snapshot_dir = constant.SNAPSHOT_DIR
# define dataset
with tf.name_scope('dataset'):
##########training###############
train_data_loader = DataLoader(train_folder, height, width)
train_data_dataset = train_data_loader(batch_size=batch_size)
train_data_it = train_data_dataset.make_one_shot_iterator()
train_data_tensor = train_data_it.get_next()
train_data_tensor.set_shape([batch_size, height, width, 7])
###input###
train_inputs = train_data_tensor[:,:,:,0:6]
train_depth = train_data_tensor[:,:,:,6:7]
print('train inputs = {}'.format(train_inputs))
print('train depth2 = {}'.format(train_depth))
#only training dataset augment
with tf.name_scope('disjoint_augment'):
train_inputs_aug = disjoint_augment_image_pair(train_inputs)
# define training generator function
with tf.variable_scope('generator', reuse=None):
print('training = {}'.format(tf.get_variable_scope().name))
train_warp2_depth, train_mesh, train_warp2_H1, train_warp2_H2, train_warp2_H3, train_one_warp_H1, train_one_warp_H2, train_one_warp_H3 = H_estimator(train_inputs_aug, train_inputs, train_depth)
with tf.name_scope('loss'):
# content loss
lam_lp = 1
loss1 = intensity_loss(gen_frames=train_warp2_H1, gt_frames=train_inputs[...,0:3]*train_one_warp_H1, l_num=1)
loss2 = intensity_loss(gen_frames=train_warp2_H2, gt_frames=train_inputs[...,0:3]*train_one_warp_H2, l_num=1)
loss3 = intensity_loss(gen_frames=train_warp2_H3, gt_frames=train_inputs[...,0:3]*train_one_warp_H3, l_num=1)
lp_loss = 16. * loss1 + 4. * loss2 + 1. * loss3
# mesh loss
lam_mesh = 10
mesh_loss = depth_consistency_loss3(train_warp2_depth, train_mesh)
with tf.name_scope('training'):
g_loss = tf.add_n([lp_loss * lam_lp, mesh_loss * lam_mesh], name='g_loss')
g_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='g_step')
g_lrate = tf.train.exponential_decay(0.0001, g_step, decay_steps=50000/4, decay_rate=0.96)
g_optimizer = tf.train.AdamOptimizer(learning_rate=g_lrate, name='g_optimizer')
g_vars = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
grads = g_optimizer.compute_gradients(g_loss, var_list=g_vars)
for i, (g, v) in enumerate(grads):
if g is not None:
grads[i] = (tf.clip_by_norm(g, 3), v) # clip gradients
g_train_op = g_optimizer.apply_gradients(grads, global_step=g_step, name='g_train_op')
# add all to summaries
## summary of loss
tf.summary.scalar(tensor=g_loss, name='g_loss')
tf.summary.scalar(tensor=lp_loss, name='lp_loss')
tf.summary.scalar(tensor=mesh_loss, name='mesh_loss')
## summary of input
tf.summary.image(tensor=train_depth, name='train_depth')
tf.summary.image(tensor=train_inputs[...,0:3], name='train_inpu1')
tf.summary.image(tensor=train_inputs[...,3:6], name='train_inpu2')
## summary of output
tf.summary.image(tensor=train_warp2_depth, name='train_warp2_depth')
tf.summary.image(tensor=train_warp2_H3, name='train_warp2_H3')
summary_op = tf.summary.merge_all()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# summaries
summary_writer = tf.summary.FileWriter(summary_dir, graph=sess.graph)
# initialize weights
sess.run(tf.global_variables_initializer())
print('Init successfully!')
# tf saver
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=None)
restore_var = [v for v in tf.global_variables()]
loader = tf.train.Saver(var_list=restore_var)
print("snapshot_dir")
print(snapshot_dir)
if os.path.isdir(snapshot_dir):
ckpt = tf.train.get_checkpoint_state(snapshot_dir)
if ckpt and ckpt.model_checkpoint_path:
load(loader, sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found.')
else:
load(loader, sess, snapshot_dir)
_step, _loss, _summaries = 0, None, None
print("============starting training===========")
while _step < iterations:
try:
print('Training generator...')
_, _g_lr, _step, _lp_loss, _mesh_loss, _g_loss, _summaries = sess.run(
[g_train_op, g_lrate, g_step, lp_loss, mesh_loss, g_loss, summary_op])
if _step % 10 == 0:
print('GeneratorModel : Step {}, lr = {:.8f}'.format(_step, _g_lr))
print(' Global Loss : ', _g_loss)
print(' intensity Loss : ({:.4f} * {:.4f} = {:.4f})'.format(_lp_loss, lam_lp, _lp_loss * lam_lp))
print(' mesh Loss : ({:.4f} * {:.4f} = {:.4f})'.format(_mesh_loss, lam_mesh, _mesh_loss * lam_mesh))
if _step % 1000 == 0:
summary_writer.add_summary(_summaries, global_step=_step)
print('Save summaries...')
if _step % 100000 == 0:
save(saver, sess, snapshot_dir, _step)
except tf.errors.OutOfRangeError:
print('Finish successfully!')
save(saver, sess, snapshot_dir, _step)
break
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"CUDA_DEVICES_ORDER"
]
| [] | ["CUDA_VISIBLE_DEVICES", "CUDA_DEVICES_ORDER"] | python | 2 | 0 | |
PCL/benchmarks/bert/implementations/mindspore_close_src/run_pretrain.py | # Copyright 2020 PCL & PKU
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
#################pre_train bert example on zh-wiki########################
python run_pretrain.py
"""
import os
import ast
import time
import math
import argparse
from mindspore.profiler import Profiler
import mindspore.communication.management as D
from mindspore.communication.management import get_rank
import mindspore.common.dtype as mstype
from mindspore import context
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, Callback
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.train_thor import ConvertModelUtils
from mindspore.nn.optim import Lamb, Momentum, AdamWeightDecay, THOR
from mindspore import log as logger
from mindspore.common import set_seed
from src import BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell, \
BertTrainAccumulationAllReduceEachWithLossScaleCell, \
BertTrainAccumulationAllReducePostWithLossScaleCell, \
BertTrainOneStepWithLossScaleCellForAdam, \
AdamWeightDecayForBert, AdamWeightDecayOp, \
BertPreTraining, BertPretrainEval
from src.dataset import create_bert_dataset, create_bert_eval_dataset
from src.config import cfg, bert_net_cfg
from src.utils import LossCallBack, BertLearningRate, BertMetric
from mlperf_logging import mllog
_current_dir = os.path.dirname(os.path.realpath(__file__))
DATA_NAME = "en-wiki-20200101"
LOCAL_CACHE_PATH = "/cache_mlperf"
LOCAL_CACHE_DATA_PATH = os.path.join(LOCAL_CACHE_PATH, DATA_NAME)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = os.getenv('RANK_TABLE_FILE')
job_id = os.getenv('JOB_ID')
job_id = job_id if job_id != "" else "default"
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
rank_id = int(os.getenv('RANK_ID', '0'))
log_filename = "bert_mllog_{}.log".format(rank_id)
# Eval interval: FLOOR(0.05 * (230.23 * GBS + 3000000), 25000)
FIRST_EVAL_SAMPLES = 1000000
EVAL_INTERVAL = 500000
def argparse_init():
"""Argparse init."""
parser = argparse.ArgumentParser(description='bert pre_training')
parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'],
help='device where the code will be implemented. (Default: Ascend)')
parser.add_argument("--distribute", type=str, default="false", choices=["true", "false"],
help="Run distribute, default is false.")
parser.add_argument("--epoch_size", type=int, default="1", help="Epoch size, default is 1.")
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
parser.add_argument("--enable_save_ckpt", type=str, default="true", choices=["true", "false"],
help="Enable save checkpoint, default is true.")
parser.add_argument("--enable_lossscale", type=str, default="true", choices=["true", "false"],
help="Use lossscale or not, default is not.")
parser.add_argument("--do_shuffle", type=str, default="true", choices=["true", "false"],
help="Enable shuffle for dataset, default is true.")
parser.add_argument("--enable_data_sink", type=str, default="true", choices=["true", "false"],
help="Enable data sink, default is true.")
parser.add_argument("--data_sink_steps", type=int, default="-1", help="Sink steps for each epoch, default is 1.")
parser.add_argument("--accumulation_steps", type=int, default="1",
help="Accumulating gradients N times before weight update, default is 1.")
parser.add_argument("--allreduce_post_accumulation", type=str, default="true", choices=["true", "false"],
help="Whether to allreduce after accumulation of N steps or after each step, default is true.")
parser.add_argument("--save_checkpoint_path", type=str, default="", help="Save checkpoint path")
parser.add_argument("--load_checkpoint_path", type=str, default="", help="Load checkpoint file path")
parser.add_argument("--save_checkpoint_steps", type=int, default=1000, help="Save checkpoint steps, "
"default is 1000.")
parser.add_argument("--train_steps", type=int, default=-1, help="Training Steps, default is -1, "
"meaning run all steps according to epoch number.")
parser.add_argument("--save_checkpoint_num", type=int, default=1, help="Save checkpoint numbers, default is 1.")
parser.add_argument("--data_dir", type=str, default="", help="Data path, it is better to use absolute path")
parser.add_argument("--schema_dir", type=str, default="", help="Schema path, it is better to use absolute path")
parser.add_argument("--enable_graph_kernel", type=str, default="auto", choices=["auto", "true", "false"],
help="Accelerate by graph kernel, default is auto.")
parser.add_argument("--total_steps", type=int, default=-1, help="Total steps, default is -1, "
"meaning run all steps according to epoch number.")
parser.add_argument("--train_with_eval", type=str, default="true", choices=['true', 'false'])
parser.add_argument("--eval_data_dir", type=str, default="", help="Data path for evaluation")
parser.add_argument("--data_url", type=str, default="", help="dataset url")
parser.add_argument("--train_url", type=str, default="", help="training url")
parser.add_argument("--enable_profile", type=ast.literal_eval, default=False, help="Enable profiling for training")
parser.add_argument("--seed", type=int, default=1, help="Random seed")
parser.add_argument("--lr_start", type=float, default=None, help="start learning rate")
parser.add_argument("--lr_end", type=float, default=None, help="end learning rate")
parser.add_argument("--eps", type=float, default=None, help="eps for optimizer")
parser.add_argument("--beta1", type=float, default=None, help="Beta_1 of lamb optimizer")
parser.add_argument("--beta2", type=float, default=None, help="Beta_2 of lamb optimizer")
parser.add_argument("--weight_decay", type=float, default=None, help="weight of lamb optimizer")
parser.add_argument("--warmup_steps", type=int, default=None, help="warmup steps for optimizer")
parser.add_argument("--start_warmup_steps", type=int, default=None, help="start warmup steps for optimizer")
parser.add_argument("--do_train", type=ast.literal_eval, default=True, help="Do training")
parser.add_argument("--do_eval", type=ast.literal_eval, default=True, help="Do evaluation before training")
parser.add_argument("--batch_size", type=int, default=None, help="Batch size of network")
parser.add_argument("--eval_batch_size", type=int, default=None, help="Evaluation batch size of network")
args_opt = parser.parse_args()
if args_opt.batch_size is not None:
cfg.batch_size = args_opt.batch_size
if args_opt.lr_start != None:
cfg.Lamb.learning_rate = args_opt.lr_start
if args_opt.lr_end != None:
cfg.Lamb.end_learning_rate = args_opt.lr_end
if args_opt.eps != None:
cfg.Lamb.eps = args_opt.eps
if args_opt.warmup_steps != None:
cfg.Lamb.warmup_steps = args_opt.warmup_steps
if args_opt.start_warmup_steps != None:
cfg.Lamb.start_warmup_steps = args_opt.start_warmup_steps
if args_opt.beta1 != None:
cfg.Lamb.beta1 = args_opt.beta1
if args_opt.beta2 != None:
cfg.Lamb.beta2 = args_opt.beta2
if args_opt.weight_decay != None:
cfg.Lamb.weight_decay = args_opt.weight_decay
if args_opt.eval_batch_size is None:
args_opt.eval_batch_size = cfg.batch_size
return args_opt
def check_data_exist(path):
if not os.path.isdir(path):
print("local cache path not exist: {}".format(path))
return False
dir_list = os.listdir(path)
if "eval" not in dir_list:
print("eval dir lost")
return False
if "train" not in dir_list:
print("train dir lost")
return False
train_count = len(os.listdir(os.path.join(path, "train")))
if train_count != 500:
print("train file lost, found: {}".format(train_count))
print("Train file found: {}".format(os.listdir(os.path.join(path, "train"))))
return False
eval_count = len(os.listdir(os.path.join(path, "eval")))
if eval_count != 1:
print("eval file lost, found: {}".format(eval_count))
print("Eval file found: {}".format(os.listdir(os.path.join(path, "eval"))))
return False
return True
def sync_dataset(data_url, data_dir):
import moxing as mox
import time
sync_lock = "/tmp/copy_sync.lock"
if device_id % min(device_num, 8) == 0 and not os.path.exists(sync_lock):
if not os.path.exists(data_dir):
os.system('sudo mkdir {}'.format(data_dir))
os.system('sudo chmod -R 777 {}'.format(data_dir))
mox.file.copy_parallel(data_url, data_dir)
print("===finish download datasets===")
try:
os.mknod(sync_lock)
except:
pass
print("===save flag===")
while True:
if os.path.exists(sync_lock):
break
time.sleep(1)
def moxing_barrier(train_url, key="train"):
if not train_url:
return
import moxing as mox
try_cnt = 1
while True:
try:
barrier_file = "{}_{}_{}.txt".format(key, job_id, rank_id)
barrier_file = os.path.join(train_url, key, barrier_file)
mox.file.write(barrier_file, '{}'.format(rank_id))
print("rank_id: {}, try_cnt={}, successful write {}".format(rank_id, try_cnt, barrier_file))
break
except Exception as e:
print(e)
print("rank_id: {}, failed {} times".format(rank_id, try_cnt))
time.sleep(3)
try_cnt += 1
while rank_id == 0:
existed = []
all_rank_exist = True
for rank_item in range(device_num):
if rank_item not in existed:
rank_fn_item = os.path.join(train_url, key, '{}_{}_{}.txt'.format(key, job_id, rank_item))
try:
if not mox.file.exists(rank_fn_item):
print("rank_fn_item:{} is not exist".format(rank_fn_item))
all_rank_exist = False
break
else:
existed.append(rank_item)
except:
all_rank_exist = False
if all_rank_exist:
break
else:
time.sleep(3)
print("Reach Barrier at time: ", time.time(), flush=True)
def moxing_copy(train_url, train_dir = "/cache/train"):
if not train_url:
print("train url is empty")
return
import moxing as mox
try_cnt = 1
print("Start to copy train directory")
rank_fn_item = os.path.join(train_url, '{}_{}_{}.txt'.format("start_copy", job_id, 0))
while True:
if rank_id == 0:
try:
mox.file.copy_parallel(train_dir, train_url)
mox.file.write(rank_fn_item, '{}'.format(rank_id))
break
except Exception as e:
print(e)
print("rank_id: {}, failed {} times".format(rank_id, try_cnt))
try_cnt += 1
else:
if not mox.file.exists(rank_fn_item):
time.sleep(1)
else:
break
print("finish copy train directory ", time.time(), flush=True)
def moxing_wrapper(run_func):
def func():
args_opt = argparse_init()
set_seed(args_opt.seed)
data_dir = "/cache/data"
train_dir = os.getcwd()
if os.path.isdir(LOCAL_CACHE_PATH):
data_dir = LOCAL_CACHE_DATA_PATH
if args_opt.data_url:
train_dir = "/cache/train"
if check_data_exist(data_dir):
print("Dataset cache found: ", os.listdir(data_dir))
else:
sync_dataset(args_opt.data_url, data_dir)
print(args_opt.data_url)
print("Finish download dataset: ", os.listdir(data_dir))
if not os.path.isdir(train_dir):
try:
os.mkdir(train_dir)
except:
pass
args_opt.data_dir = os.path.join(data_dir, "train")
args_opt.eval_data_dir = os.path.join(data_dir, "eval")
args_opt.save_checkpoint_path = train_dir
args_opt.device_num = device_num
args_opt.device_id = device_id
moxing_barrier(args_opt.train_url, "dataset")
global log_filename
log_filename = os.path.join(train_dir, log_filename)
if args_opt.enable_profile:
profiler = Profiler(output_path=train_dir)
run_func(args_opt)
if args_opt.enable_profile:
profiler.analyse()
if args_opt.train_url:
import moxing as mox
print("Start to copy train directory")
if rank_id == 0:
mox.file.copy_parallel(train_dir, args_opt.train_url)
moxing_barrier(args_opt.train_url, "finish")
return func
class EvalCallback(Callback):
def __init__(self, model, eval_ds, global_batch, mllogger, train_url=""):
super(EvalCallback, self).__init__()
self.model = model
self.eval_ds = eval_ds
self.global_batch = global_batch
self.eval_count = 0
self.num_samples = 1
self.mllogger = mllogger
self.train_url = train_url
def epoch_begin(self, run_context):
self.mllogger.start(key=mllog.constants.BLOCK_START, metadata={'first_epoch_num': self.num_samples,
'epoch_count': self.num_samples})
def epoch_end(self, run_context):
cb_params = run_context.original_args()
num_samples = cb_params.cur_step_num * self.global_batch
self.num_samples = num_samples
cur_eval = num_samples // EVAL_INTERVAL
print("num_samples: ", num_samples, " cur_eval: ", cur_eval, " eval_count: ", self.eval_count,
" at time: ", time.time(), flush=True)
if cur_eval > self.eval_count:
res = self.model.eval(self.eval_ds, dataset_sink_mode=True)
print("===========================")
print("Accuracy is: ", "%.4f" % res, " at time: ", time.time(), " num samples: ", num_samples)
self.mllogger.end(key=mllog.constants.BLOCK_STOP, metadata={'first_epoch_num': self.num_samples,
'epoch_num': self.num_samples})
self.mllogger.event(key=mllog.constants.EVAL_ACCURACY, value=res,
metadata={"train_samples": num_samples,
"epoch_num": self.num_samples})
print("===========================")
if res > 0.72:
self.mllogger.event(key=mllog.constants.RUN_STOP, metadata={"status": "success"})
self.mllogger.event(key="train_samples", value=num_samples)
self.mllogger.event(key="eval_samples", value=10000)
if self.train_url:
moxing_copy(train_url=self.train_url)
run_context.request_stop()
self.eval_count += 1
def _set_bert_all_reduce_split():
"""set bert all_reduce fusion split, support num_hidden_layers is 12 and 24."""
device_target = context.get_context('device_target')
enable_graph_kernel = context.get_context('enable_graph_kernel')
device_num = context.get_auto_parallel_context('device_num')
if bert_net_cfg.num_hidden_layers == 12:
if bert_net_cfg.use_relative_positions:
context.set_auto_parallel_context(all_reduce_fusion_config=[29, 58, 87, 116, 145, 174, 203, 217])
else:
context.set_auto_parallel_context(all_reduce_fusion_config=[28, 55, 82, 109, 136, 163, 190, 205])
if device_target == 'GPU' and enable_graph_kernel and device_num == 8:
context.set_auto_parallel_context(all_reduce_fusion_config=[180, 205])
elif device_target == 'GPU' and enable_graph_kernel and device_num == 16:
context.set_auto_parallel_context(all_reduce_fusion_config=[120, 205])
elif bert_net_cfg.num_hidden_layers == 24:
if bert_net_cfg.use_relative_positions:
context.set_auto_parallel_context(all_reduce_fusion_config=[30, 90, 150, 210, 270, 330, 390, 421])
else:
context.set_auto_parallel_context(all_reduce_fusion_config=[38, 93, 148, 203, 258, 313, 368, 397])
def _get_optimizer(args_opt, network):
"""get bert optimizer, support Lamb, Momentum, AdamWeightDecay."""
if cfg.optimizer != 'Lamb':
raise ValueError("Only support Lamb for mlperf")
lr_schedule = BertLearningRate(learning_rate=cfg.Lamb.learning_rate,
end_learning_rate=cfg.Lamb.end_learning_rate,
warmup_steps=cfg.Lamb.warmup_steps,
start_warmup_steps=cfg.Lamb.start_warmup_steps,
decay_steps=args_opt.total_steps,
power=cfg.Lamb.power)
params = network.trainable_params()
decay_params = list(filter(cfg.Lamb.decay_filter, params))
other_params = list(filter(lambda x: not cfg.Lamb.decay_filter(x), params))
group_params = [{'params': decay_params, 'weight_decay': cfg.Lamb.weight_decay},
{'params': other_params},
{'order_params': params}]
optimizer = Lamb(group_params, learning_rate=lr_schedule, eps=cfg.Lamb.eps,
beta1=cfg.Lamb.beta1, beta2=cfg.Lamb.beta2)
return optimizer
def _auto_enable_graph_kernel(device_target, graph_kernel_mode):
"""Judge whether is suitable to enable graph kernel."""
return graph_kernel_mode in ("auto", "true") and device_target == 'GPU' and \
cfg.bert_network == 'base' and cfg.optimizer == 'AdamWeightDecay'
def _set_graph_kernel_context(device_target, enable_graph_kernel, is_auto_enable_graph_kernel):
if enable_graph_kernel == "true" or is_auto_enable_graph_kernel:
if device_target == 'GPU':
context.set_context(enable_graph_kernel=True)
else:
logger.warning('Graph kernel only supports GPU back-end now, run with graph kernel off.')
def _check_compute_type(args_opt, is_auto_enable_graph_kernel):
if args_opt.device_target == 'GPU' and bert_net_cfg.compute_type != mstype.float32 and \
not is_auto_enable_graph_kernel:
warning_message = 'Gpu only support fp32 temporarily, run with fp32.'
bert_net_cfg.compute_type = mstype.float32
if args_opt.enable_lossscale == "true":
args_opt.enable_lossscale = "false"
warning_message = 'Gpu only support fp32 temporarily, run with fp32 and disable lossscale.'
logger.warning(warning_message)
@moxing_wrapper
def run_pretrain(args_opt):
"""pre-train bert_clue"""
print("config: ", cfg)
print("args:", args_opt)
print("mllog file: ", log_filename)
mllog.config(filename=log_filename)
mllog.config(
default_namespace="mindspore",
default_stack_offset=1,
default_clear_line=False,
root_dir=os.path.normpath(
os.path.dirname(os.path.realpath(__file__))))
mllogger = mllog.get_mllogger()
mllogger.event(key=mllog.constants.SUBMISSION_BENCHMARK, value="bert")
mllogger.event(key=mllog.constants.SUBMISSION_DIVISION, value="closed")
mllogger.event(key=mllog.constants.SUBMISSION_ORG, value="PCL")
mllogger.event(key=mllog.constants.SUBMISSION_PLATFORM, value="Ascend 910A")
mllogger.event(key=mllog.constants.SUBMISSION_STATUS, value="research")
mllogger.event(key=mllog.constants.CACHE_CLEAR)
global_batchsize = args_opt.device_num * cfg.batch_size
# Eval interval: FLOOR(0.05 * (230.23 * GBS + 3000000), 25000)
global EVAL_INTERVAL
EVAL_INTERVAL = math.floor(0.05 * (230.23 * global_batchsize + 3000000) / 25000)
EVAL_INTERVAL *= 25000
print("EVAL_INTERVAL: ", EVAL_INTERVAL)
start_time = time.time()
print("start time: ", start_time)
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args_opt.device_id)
context.set_context(reserve_class_name_in_scope=False)
is_auto_enable_graph_kernel = _auto_enable_graph_kernel(args_opt.device_target, args_opt.enable_graph_kernel)
_set_graph_kernel_context(args_opt.device_target, args_opt.enable_graph_kernel, is_auto_enable_graph_kernel)
ckpt_save_dir = args_opt.save_checkpoint_path
if args_opt.distribute == "true":
if args_opt.device_target == 'Ascend':
D.init()
device_num = args_opt.device_num
rank = D.get_rank()
print("Device_num: ", device_num)
print("rank_id: ", rank_id)
print("rank: ", rank)
print("Group_size: ", D.get_group_size())
else:
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
ckpt_save_dir = args_opt.save_checkpoint_path + 'ckpt_' + str(get_rank()) + '/'
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
_set_bert_all_reduce_split()
else:
rank = 0
device_num = 1
_check_compute_type(args_opt, is_auto_enable_graph_kernel)
if args_opt.data_sink_steps == -1:
args_opt.data_sink_steps = math.ceil(EVAL_INTERVAL / (device_num * cfg.batch_size * args_opt.accumulation_steps))
if args_opt.accumulation_steps > 1:
logger.info("accumulation steps: {}".format(args_opt.accumulation_steps))
logger.info("global batch size: {}".format(cfg.batch_size * args_opt.accumulation_steps))
if args_opt.enable_data_sink == "true":
args_opt.data_sink_steps *= args_opt.accumulation_steps
logger.info("data sink steps: {}".format(args_opt.data_sink_steps))
if args_opt.enable_save_ckpt == "true":
args_opt.save_checkpoint_steps *= args_opt.accumulation_steps
logger.info("save checkpoint steps: {}".format(args_opt.save_checkpoint_steps))
ds = create_bert_dataset(device_num, rank, args_opt.do_shuffle, args_opt.data_dir, args_opt.schema_dir)
print("create dataset time: ", time.time() - start_time, flush=True);
bert = BertPreTraining(bert_net_cfg, True, False)
net_with_loss = BertNetworkWithLoss(bert_net_cfg, True, bert=bert)
print("net_with_loss time: ", time.time() - start_time, flush=True);
new_repeat_count = args_opt.epoch_size * ds.get_dataset_size() // args_opt.data_sink_steps
if args_opt.train_steps > 0:
train_steps = args_opt.train_steps * args_opt.accumulation_steps
new_repeat_count = min(new_repeat_count, train_steps // args_opt.data_sink_steps)
else:
args_opt.train_steps = args_opt.epoch_size * ds.get_dataset_size() // args_opt.accumulation_steps
logger.info("train steps: {}".format(args_opt.train_steps))
args_opt.total_steps = args_opt.train_steps if args_opt.total_steps == -1 else args_opt.total_steps
optimizer = _get_optimizer(args_opt, net_with_loss)
callback = [TimeMonitor(args_opt.data_sink_steps), LossCallBack(ds.get_dataset_size())]
if args_opt.enable_save_ckpt == "true" and args_opt.device_id % min(8, device_num) == 0:
config_ck = CheckpointConfig(save_checkpoint_steps=args_opt.save_checkpoint_steps,
keep_checkpoint_max=args_opt.save_checkpoint_num)
ckpoint_cb = ModelCheckpoint(prefix='checkpoint_bert',
directory=None if ckpt_save_dir == "" else ckpt_save_dir, config=config_ck)
callback.append(ckpoint_cb)
if args_opt.load_checkpoint_path:
param_dict = load_checkpoint(args_opt.load_checkpoint_path)
net_param = net_with_loss.parameters_dict()
load_param_into_net(net_with_loss, param_dict)
if args_opt.enable_lossscale == "true":
update_cell = DynamicLossScaleUpdateCell(loss_scale_value=cfg.loss_scale_value,
scale_factor=cfg.scale_factor,
scale_window=cfg.scale_window)
accumulation_steps = args_opt.accumulation_steps
enable_global_norm = cfg.enable_global_norm
if accumulation_steps <= 1:
if cfg.optimizer == 'AdamWeightDecay' and args_opt.device_target == 'GPU':
net_with_grads = BertTrainOneStepWithLossScaleCellForAdam(net_with_loss, optimizer=optimizer,
scale_update_cell=update_cell)
else:
net_with_grads = BertTrainOneStepWithLossScaleCell(net_with_loss, optimizer=optimizer,
scale_update_cell=update_cell)
else:
allreduce_post = args_opt.distribute == "false" or args_opt.allreduce_post_accumulation == "true"
net_with_accumulation = (BertTrainAccumulationAllReducePostWithLossScaleCell if allreduce_post else
BertTrainAccumulationAllReduceEachWithLossScaleCell)
net_with_grads = net_with_accumulation(net_with_loss, optimizer=optimizer,
scale_update_cell=update_cell,
accumulation_steps=accumulation_steps,
enable_global_norm=enable_global_norm)
else:
net_with_grads = BertTrainOneStepCell(net_with_loss, optimizer=optimizer)
print("net_with_grads time: ", time.time() - start_time, flush=True);
mllogger.event(key=mllog.constants.GLOBAL_BATCH_SIZE, value=cfg.batch_size * device_num)
mllogger.event(key="gradient_accumulation_steps", value=args_opt.accumulation_steps)
mllogger.event(key="seed", value=args_opt.seed)
mllogger.event(key="opt_name", value="lamb")
mllogger.event(key=mllog.constants.OPT_BASE_LR, value=cfg.Lamb.learning_rate)
mllogger.event(key=mllog.constants.OPT_LAMB_LR_MIN, value=cfg.Lamb.end_learning_rate)
mllogger.event(key=mllog.constants.OPT_LAMB_LR_DECAY_POLY_POWER, value=cfg.Lamb.power)
mllogger.event(key="opt_learning_rate_training_steps", value=args_opt.total_steps)
mllogger.event(key="opt_learning_rate_warmup_steps", value=cfg.Lamb.warmup_steps)
mllogger.event(key="num_warmup_steps", value=cfg.Lamb.warmup_steps)
mllogger.event(key="opt_epsilon", value=cfg.Lamb.eps)
mllogger.event(key="opt_lamb_beta_1", value=cfg.Lamb.beta1)
mllogger.event(key="opt_lamb_beta_2", value=cfg.Lamb.beta2)
mllogger.event(key="opt_lamb_weight_decay_rate", value=cfg.Lamb.weight_decay)
mllogger.event(key="start_warmup_step", value=cfg.Lamb.start_warmup_steps)
mllogger.start(key=mllog.constants.INIT_START)
eval_ds = create_bert_eval_dataset(args_opt.eval_batch_size, device_num, rank, args_opt.eval_data_dir, None)
net_eval = BertPretrainEval(bert_net_cfg, bert=bert)
print("eval phase: ", net_eval.phase)
model = Model(net_with_grads, eval_network=net_eval, metrics={'bert_acc': BertMetric(cfg.batch_size)})
model._init(ds, None, args_opt.data_sink_steps, new_repeat_count)
if args_opt.do_eval:
res = model.eval(eval_ds, dataset_sink_mode=True)
print("===========================")
print("Accuracy is: ", "%.4f" % res, " at time: ", time.time())
print("===========================")
mllogger.event(key=mllog.constants.EVAL_ACCURACY, value=res,
metadata={"train_samples": 0,
"epoch_num": 0})
if args_opt.train_with_eval == 'true':
eval_callback = EvalCallback(model, eval_ds, device_num * cfg.batch_size, mllogger, args_opt.train_url)
callback.append(eval_callback)
model._init(None, eval_ds, args_opt.data_sink_steps, new_repeat_count)
print("initialization time: ", time.time() - start_time, " at time: ", time.time(), flush=True);
if args_opt.train_url:
moxing_barrier(args_opt.train_url)
else:
if rank == 0:
time.sleep(100)
mllogger.end(key=mllog.constants.INIT_STOP)
start_time = time.time()
print("start running time: ", start_time, flush=True)
mllogger.start(key=mllog.constants.RUN_START)
if args_opt.do_train:
model.train(new_repeat_count, ds, callbacks=callback,
dataset_sink_mode=(args_opt.enable_data_sink == "true"), sink_size=args_opt.data_sink_steps)
end_time = time.time()
print("finish time: ", end_time, ", time cost: ", end_time - start_time)
if __name__ == '__main__':
print("current path: ", os.getcwd())
run_pretrain()
| []
| []
| [
"JOB_ID",
"RANK_ID",
"RANK_TABLE_FILE",
"RANK_SIZE",
"DEVICE_ID",
"MINDSPORE_HCCL_CONFIG_PATH"
]
| [] | ["JOB_ID", "RANK_ID", "RANK_TABLE_FILE", "RANK_SIZE", "DEVICE_ID", "MINDSPORE_HCCL_CONFIG_PATH"] | python | 6 | 0 | |
design/principle/trade/apple.go | package trade
import "fmt"
var apple = &Apple{}
type Apple struct {
}
func (t Apple) order() {
fmt.Println("apple order")
}
| []
| []
| []
| [] | [] | go | null | null | null |
bin/BuildSystem/CMakeBuildSystem.py | # SPDX-License-Identifier: BSD-2-Clause
# SPDX-FileCopyrightText: 2009 Ralf Habacker <[email protected]>
# SPDX-FileCopyrightText: 2020 Nicolas Fella <[email protected]>
# SPDX-FileCopyrightText: 2021 Volker Krause <[email protected]>
"""@package provides cmake build system"""
from BuildSystem.BuildSystemBase import *
from CraftOS.osutils import OsUtils
from CraftStandardDirs import CraftStandardDirs
from Utils.PostInstallRoutines import *
from Utils.Arguments import Arguments
import os
class CMakeBuildSystem(BuildSystemBase):
""" cmake build support """
def __init__(self):
"""constructor. configureOptions are added to the configure command line and makeOptions are added to the make command line"""
BuildSystemBase.__init__(self, "cmake")
self.supportsNinja = True
def __makeFileGenerator(self):
"""return cmake related make file generator"""
if self.makeProgram == "ninja":
return "Ninja"
if OsUtils.isWin():
if CraftCore.compiler.isMSVC() and not CraftCore.compiler.isIntel():
return "NMake Makefiles"
if CraftCore.compiler.isMinGW():
return "MinGW Makefiles"
elif OsUtils.isUnix():
return "Unix Makefiles"
else:
CraftCore.log.critical(f"unknown {CraftCore.compiler} compiler")
def configureOptions(self, defines=""):
"""returns default configure options"""
craftRoot = OsUtils.toUnixPath(CraftCore.standardDirs.craftRoot())
options = Arguments([defines])
options += [
"-DBUILD_TESTING={testing}".format(testing="ON" if self.buildTests else "OFF"),
"-DBUILD_SHARED_LIBS={shared}".format(shared="OFF" if self.subinfo.options.buildStatic else "ON"),
BuildSystemBase.configureOptions(self),
f"-DCMAKE_INSTALL_PREFIX={craftRoot}",
f"-DCMAKE_PREFIX_PATH={craftRoot}",
f"-DCMAKE_REQUIRED_INCLUDES={craftRoot}/include",
f"-DCMAKE_C_STANDARD_INCLUDE_DIRECTORIES={craftRoot}/include",
]
if self.buildType() is not None:
options.append(f"-DCMAKE_BUILD_TYPE={self.buildType()}")
#if CraftCore.compiler.isGCC() and not CraftCore.compiler.isNative():
# options += " -DCMAKE_TOOLCHAIN_FILE=%s" % os.path.join(CraftStandardDirs.craftRoot(), "craft", "bin", "toolchains", "Toolchain-cross-mingw32-linux-%s.cmake" % CraftCore.compiler.architecture)
if CraftCore.settings.getboolean("CMake", "KDE_L10N_AUTO_TRANSLATIONS", False):
options.append("-DKDE_L10N_AUTO_TRANSLATIONS=ON")
options.append("-DKDE_L10N_SYNC_TRANSLATIONS=ON")
if CraftCore.compiler.isWindows:
# people use InstallRequiredSystemLibraries.cmake wrong and unconditionally install the
# msvc crt...
options.append("-DCMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP=ON")
elif CraftCore.compiler.isMacOS:
options += [f"-DKDE_INSTALL_BUNDLEDIR={OsUtils.toUnixPath(CraftCore.standardDirs.craftRoot())}/Applications/KDE", "-DAPPLE_SUPPRESS_X11_WARNING=ON"]
elif CraftCore.compiler.isLinux:
# use the same lib dir on all distributions
options += ["-DCMAKE_INSTALL_LIBDIR:PATH=lib"]
elif CraftCore.compiler.isAndroid:
nativeToolingRoot = CraftCore.settings.get("General", "KF5HostToolingRoot", "/opt/nativetooling")
nativeToolingCMake = CraftCore.settings.get("General", "KF5HostToolingCMakePath", "/opt/nativetooling/lib/x86_64-linux-gnu/cmake/")
additionalFindRoots = ";".join(filter(None, [CraftCore.settings.get("General", "AndroidAdditionalFindRootPath", ""), craftRoot]))
options += [f"-DCMAKE_TOOLCHAIN_FILE={nativeToolingRoot}/share/ECM/toolchain/Android.cmake",
f"-DECM_ADDITIONAL_FIND_ROOT_PATH='{additionalFindRoots}'",
f"-DKF5_HOST_TOOLING={nativeToolingCMake}",
f"-DANDROID_APK_OUTPUT_DIR={self.packageDestinationDir()}",
f"-DANDROID_FASTLANE_METADATA_OUTPUT_DIR={self.packageDestinationDir()}"]
# should we detect the apk targets
if hasattr(self, "androidApkDirs"):
if self.androidApkTargets:
options += [f"-DQTANDROID_EXPORTED_TARGET={';'.join(self.androidApkTargets)}",
f"-DANDROID_APK_DIR={';'.join(self.androidApkDirs)}"]
if self.buildType() == "Release" or self.buildType() == "MinSizeRel":
options += ["-DANDROIDDEPLOYQT_EXTRA_ARGS=--release"]
if CraftCore.compiler.isWindows or CraftCore.compiler.isMacOS:
options.append("-DKDE_INSTALL_USE_QT_SYS_PATHS=ON")
if self.subinfo.options.buildTools:
options += self.subinfo.options.configure.toolsDefine
if self.subinfo.options.buildStatic and self.subinfo.options.configure.staticArgs:
options += self.subinfo.options.configure.staticArgs
if CraftCore.compiler.isIntel():
# this is needed because otherwise it'll detect the MSVC environment
options += " -DCMAKE_CXX_COMPILER=\"%s\" " % os.path.join(os.getenv("BIN_ROOT"), os.getenv("ARCH_PATH"),
"icl.exe").replace("\\", "/")
options += " -DCMAKE_C_COMPILER=\"%s\" " % os.path.join(os.getenv("BIN_ROOT"), os.getenv("ARCH_PATH"),
"icl.exe").replace("\\", "/")
options += " -DCMAKE_LINKER=\"%s\" " % os.path.join(os.getenv("BIN_ROOT"), os.getenv("ARCH_PATH"),
"xilink.exe").replace("\\", "/")
options += ["-S", self.configureSourceDir()]
return options
def configure(self, defines=""):
"""implements configure step for cmake projects"""
self.enterBuildDir()
env = {}
if self.supportsCCACHE:
cxx = CraftCore.standardDirs.craftRoot()/ "dev-utils/ccache/bin" / Path(os.environ["CXX"]).name
if CraftCore.compiler.isWindows and not cxx.suffix:
cxx = Path(str(cxx) + CraftCore.compiler.executableSuffix)
if cxx.exists():
env["CXX"] = cxx
env["CC"] = cxx.parent / Path(os.environ["CC"]).name
with utils.ScopedEnv(env):
command = Arguments.formatCommand(["cmake", "-G", self.__makeFileGenerator()], self.configureOptions(defines))
return utils.system(command)
def make(self):
"""implements the make step for cmake projects"""
self.enterBuildDir()
command = Arguments.formatCommand([self.makeProgram], self.makeOptions(self.subinfo.options.make.args))
return utils.system(command)
def install(self):
"""install the target"""
if not BuildSystemBase.install(self):
return False
self.enterBuildDir()
with utils.ScopedEnv({"DESTDIR" : self.installDir()}):
command = Arguments.formatCommand([self.makeProgram], self.makeOptions(self.subinfo.options.install.args))
return (utils.system(command) and
self._fixInstallPrefix())
def unittest(self):
"""running cmake based unittests"""
self.enterBuildDir()
with utils.ScopedEnv({"QT_FORCE_STDERR_LOGGING": 1, "QT_ASSUME_STDERR_HAS_CONSOLE": 1}):
command = ["ctest", "--output-on-failure", "--timeout", "300", "-j", str(CraftCore.settings.get("Compile", "Jobs", multiprocessing.cpu_count()))]
if CraftCore.debug.verbose() == 1:
command += ["-V"]
elif CraftCore.debug.verbose() > 1:
command += ["-VV"]
return utils.system(command)
def internalPostQmerge(self):
if not super().internalPostQmerge():
return False
return PostInstallRoutines.updateSharedMimeInfo(self)
| []
| []
| [
"CXX",
"ARCH_PATH",
"BIN_ROOT",
"CC"
]
| [] | ["CXX", "ARCH_PATH", "BIN_ROOT", "CC"] | python | 4 | 0 | |
components/espcoredump/espcoredump.py | #!/usr/bin/env python
#
# ESP32 core dump Utility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from hashlib import sha256
import sys
try:
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
except ImportError:
print('Import has failed probably because of the missing "future" package. Please install all the packages for '
'interpreter {} from the $IDF_PATH/requirements.txt file.'.format(sys.executable))
sys.exit(1)
import os
import argparse
import subprocess
import tempfile
import struct
import errno
import base64
import binascii
import logging
import re
idf_path = os.getenv('IDF_PATH')
if idf_path:
sys.path.insert(0, os.path.join(idf_path, 'components', 'esptool_py', 'esptool'))
try:
import esptool
except ImportError:
print("esptool is not found! Set proper $IDF_PATH in environment.")
sys.exit(2)
__version__ = "0.4-dev"
if os.name == 'nt':
CLOSE_FDS = False
else:
CLOSE_FDS = True
INVALID_CAUSE_VALUE = 0xFFFF
XCHAL_EXCCAUSE_NUM = 64
# Exception cause dictionary to get translation of exccause register
# From 4.4.1.5 table 4-64 Exception Causes of Xtensa
# Instruction Set Architecture (ISA) Reference Manual
xtensa_exception_cause_dict = {
0: ("IllegalInstructionCause", "Illegal instruction"),
1: ("SyscallCause", "SYSCALL instruction"),
2: ("InstructionFetchErrorCause", "Processor internal physical address or data error during instruction fetch. (See EXCVADDR for more information)"),
3: ("LoadStoreErrorCause", "Processor internal physical address or data error during load or store. (See EXCVADDR for more information)"),
4: ("Level1InterruptCause", "Level-1 interrupt as indicated by set level-1 bits in the INTERRUPT register"),
5: ("AllocaCause", "MOVSP instruction, if caller`s registers are not in the register file"),
6: ("IntegerDivideByZeroCause", "QUOS: QUOU, REMS: or REMU divisor operand is zero"),
8: ("PrivilegedCause", "Attempt to execute a privileged operation when CRING ? 0"),
9: ("LoadStoreAlignmentCause", "Load or store to an unaligned address. (See EXCVADDR for more information)"),
12: ("InstrPIFDataErrorCause", "PIF data error during instruction fetch. (See EXCVADDR for more information)"),
13: ("LoadStorePIFDataErrorCause", "Synchronous PIF data error during LoadStore access. (See EXCVADDR for more information)"),
14: ("InstrPIFAddrErrorCause", "PIF address error during instruction fetch. (See EXCVADDR for more information)"),
15: ("LoadStorePIFAddrErrorCause", "Synchronous PIF address error during LoadStore access. (See EXCVADDR for more information)"),
16: ("InstTLBMissCause", "Error during Instruction TLB refill. (See EXCVADDR for more information)"),
17: ("InstTLBMultiHitCause", "Multiple instruction TLB entries matched. (See EXCVADDR for more information)"),
18: ("InstFetchPrivilegeCause", "An instruction fetch referenced a virtual address at a ring level less than CRING. (See EXCVADDR for more information)"),
20: ("InstFetchProhibitedCause", "An instruction fetch referenced a page mapped with an attribute that does not permit instruction fetch (EXCVADDR)."),
24: ("LoadStoreTLBMissCause", "Error during TLB refill for a load or store. (See EXCVADDR for more information)"),
25: ("LoadStoreTLBMultiHitCause", "Multiple TLB entries matched for a load or store. (See EXCVADDR for more information)"),
26: ("LoadStorePrivilegeCause", "A load or store referenced a virtual address at a ring level less than CRING. (See EXCVADDR for more information)"),
28: ("LoadProhibitedCause", "A load referenced a page mapped with an attribute that does not permit loads. (See EXCVADDR for more information)"),
29: ("StoreProhibitedCause", "A store referenced a page mapped with an attribute that does not permit stores [Region Protection Option or MMU Option]."),
32: ("Coprocessor0Disabled", "Coprocessor 0 instruction when cp0 disabled"),
33: ("Coprocessor1Disabled", "Coprocessor 1 instruction when cp1 disabled"),
34: ("Coprocessor2Disabled", "Coprocessor 2 instruction when cp2 disabled"),
35: ("Coprocessor3Disabled", "Coprocessor 3 instruction when cp3 disabled"),
36: ("Coprocessor4Disabled", "Coprocessor 4 instruction when cp4 disabled"),
37: ("Coprocessor5Disabled", "Coprocessor 5 instruction when cp5 disabled"),
38: ("Coprocessor6Disabled", "Coprocessor 6 instruction when cp6 disabled"),
39: ("Coprocessor7Disabled", "Coprocessor 7 instruction when cp7 disabled"),
INVALID_CAUSE_VALUE: ("InvalidCauseRegister", "Invalid EXCCAUSE register value or current task is broken and was skipped"),
# ESP panic pseudo reasons
XCHAL_EXCCAUSE_NUM + 0: ("UnknownException", "Unknown exception"),
XCHAL_EXCCAUSE_NUM + 1: ("DebugException", "Unhandled debug exception"),
XCHAL_EXCCAUSE_NUM + 2: ("DoubleException", "Double exception"),
XCHAL_EXCCAUSE_NUM + 3: ("KernelException", "Unhandled kernel exception"),
XCHAL_EXCCAUSE_NUM + 4: ("CoprocessorException", "Coprocessor exception"),
XCHAL_EXCCAUSE_NUM + 5: ("InterruptWDTTimoutCPU0", "Interrupt wdt timeout on CPU0"),
XCHAL_EXCCAUSE_NUM + 6: ("InterruptWDTTimoutCPU1", "Interrupt wdt timeout on CPU1"),
XCHAL_EXCCAUSE_NUM + 7: ("CacheError", "Cache disabled but cached memory region accessed"),
}
class ESPCoreDumpError(RuntimeError):
"""Core dump runtime error class
"""
def __init__(self, message):
"""Constructor for core dump error
"""
super(ESPCoreDumpError, self).__init__(message)
class BinStruct(object):
"""Binary structure representation
Subclasses must specify actual structure layout using 'fields' and 'format' members.
For example, the following subclass represents structure with two fields:
f1 of size 2 bytes and 4 bytes f2. Little endian.
class SomeStruct(BinStruct):
fields = ("f1",
"f2")
format = "<HL"
Then subclass can be used to initialize fields of underlaying structure and convert it to binary representation:
f = open('some_struct.bin', 'wb')
s = SomeStruct()
s.f1 = 1
s.f2 = 10
f.write(s.dump())
f.close()
"""
def __init__(self, buf=None):
"""Base constructor for binary structure objects
"""
if buf is None:
buf = b'\0' * self.sizeof()
fields = struct.unpack(self.__class__.format, buf[:self.sizeof()])
self.__dict__.update(zip(self.__class__.fields, fields))
def sizeof(self):
"""Returns the size of the structure represented by specific subclass
"""
return struct.calcsize(self.__class__.format)
def dump(self):
"""Returns binary representation of structure
"""
keys = self.__class__.fields
return struct.pack(self.__class__.format, *(self.__dict__[k] for k in keys))
class Elf32FileHeader(BinStruct):
"""ELF32 file header
"""
fields = ("e_ident",
"e_type",
"e_machine",
"e_version",
"e_entry",
"e_phoff",
"e_shoff",
"e_flags",
"e_ehsize",
"e_phentsize",
"e_phnum",
"e_shentsize",
"e_shnum",
"e_shstrndx")
format = "<16sHHLLLLLHHHHHH"
def __init__(self, buf=None):
"""Constructor for ELF32 file header structure
"""
super(Elf32FileHeader, self).__init__(buf)
if buf is None:
# Fill in sane ELF header for LSB32
self.e_ident = b"\x7fELF\1\1\1\0\0\0\0\0\0\0\0\0"
self.e_version = ESPCoreDumpElfFile.EV_CURRENT
self.e_ehsize = self.sizeof()
class Elf32ProgramHeader(BinStruct):
"""ELF32 program header
"""
fields = ("p_type",
"p_offset",
"p_vaddr",
"p_paddr",
"p_filesz",
"p_memsz",
"p_flags",
"p_align")
format = "<LLLLLLLL"
class Elf32NoteDesc(object):
"""ELF32 note descriptor
"""
def __init__(self, name, type, desc):
"""Constructor for ELF32 note descriptor
"""
self.name = name
self.type = type
self.desc = desc
def dump(self):
"""Returns binary representation of ELF32 note descriptor
"""
nm_buf = bytearray(self.name, encoding='ascii') + b'\0'
hdr = struct.pack("<LLL", len(nm_buf), len(self.desc), self.type)
# pad for 4 byte alignment
name = nm_buf + ((4 - len(nm_buf)) % 4) * b'\0'
desc = self.desc + ((4 - len(self.desc)) % 4) * b'\0'
return hdr + name + desc
def read(self, data):
"""Reads ELF32 note descriptor
"""
hdr_sz = struct.calcsize("<LLL")
nm_len,desc_len,self.type = struct.unpack("<LLL", data[:hdr_sz])
nm_len_a = nm_len + ((4 - nm_len) % 4)
self.name = struct.unpack("<%ds" % (nm_len - 1), data[hdr_sz:hdr_sz + nm_len - 1])[0].decode('ascii')
self.desc = data[hdr_sz + nm_len_a:hdr_sz + nm_len_a + desc_len]
desc_len_a = desc_len + ((4 - desc_len) % 4)
return hdr_sz + nm_len_a + desc_len_a
class XtensaPrStatus(BinStruct):
"""Xtensa program status structure"""
fields = ("si_signo", "si_code", "si_errno",
"pr_cursig",
"pr_pad0",
"pr_sigpend",
"pr_sighold",
"pr_pid",
"pr_ppid",
"pr_pgrp",
"pr_sid",
"pr_utime",
"pr_stime",
"pr_cutime",
"pr_cstime")
format = "<3LHHLLLLLLQQQQ"
class EspCoreDumpTaskStatus(BinStruct):
"""Core dump status structure"""
# task status flags for note
TASK_STATUS_CORRECT = 0x00
TASK_STATUS_TCB_CORRUPTED = 0x01
TASK_STATUS_STACK_CORRUPTED = 0x02
fields = ("task_index",
"task_flags",
"task_tcb_addr",
"task_stack_start",
"task_stack_len",
"task_name")
format = "<LLLLL16s"
class ESPCoreDumpSegment(esptool.ImageSegment):
""" Wrapper class for a program segment in core ELF file, has a segment
type and flags as well as the common properties of an ImageSegment.
"""
# segment flags
PF_X = 0x1 # Execute
PF_W = 0x2 # Write
PF_R = 0x4 # Read
def __init__(self, addr, data, type, flags):
"""Constructor for program segment
"""
super(ESPCoreDumpSegment, self).__init__(addr, data)
self.flags = flags
self.type = type
def __repr__(self):
"""Returns string representation of program segment
"""
return "%s %s %s" % (self.type, self.attr_str(), super(ESPCoreDumpSegment, self).__repr__())
def attr_str(self):
"""Returns string representation of program segment attributes
"""
str = ''
if self.flags & self.PF_R:
str += 'R'
else:
str += ' '
if self.flags & self.PF_W:
str += 'W'
else:
str += ' '
if self.flags & self.PF_X:
str += 'X'
else:
str += ' '
return str
class ESPCoreDumpSection(esptool.ELFSection):
""" Wrapper class for a section in core ELF file, has a section
flags as well as the common properties of an esptool.ELFSection.
"""
# section flags
SHF_WRITE = 0x1
SHF_ALLOC = 0x2
SHF_EXECINSTR = 0x4
def __init__(self, name, addr, data, flags):
"""Constructor for section
"""
super(ESPCoreDumpSection, self).__init__(name, addr, data)
self.flags = flags
def __repr__(self):
"""Returns string representation of section
"""
return "%s %s" % (super(ESPCoreDumpSection, self).__repr__(), self.attr_str())
def attr_str(self):
"""Returns string representation of section attributes
"""
str = "R"
if self.flags & self.SHF_WRITE:
str += 'W'
else:
str += ' '
if self.flags & self.SHF_EXECINSTR:
str += 'X'
else:
str += ' '
if self.flags & self.SHF_ALLOC:
str += 'A'
else:
str += ' '
return str
class ESPCoreDumpElfFile(esptool.ELFFile):
""" Wrapper class for core dump ELF file
"""
# extra regs IDs used in EXTRA_INFO note
REG_EXCCAUSE_IDX = 0
REG_EXCVADDR_IDX = 1
REG_EPC1_IDX = 177
REG_EPC2_IDX = 178
REG_EPC3_IDX = 179
REG_EPC4_IDX = 180
REG_EPC5_IDX = 181
REG_EPC6_IDX = 182
REG_EPC7_IDX = 183
REG_EPS2_IDX = 194
REG_EPS3_IDX = 195
REG_EPS4_IDX = 196
REG_EPS5_IDX = 197
REG_EPS6_IDX = 198
REG_EPS7_IDX = 199
# ELF file type
ET_NONE = 0x0 # No file type
ET_REL = 0x1 # Relocatable file
ET_EXEC = 0x2 # Executable file
ET_DYN = 0x3 # Shared object file
ET_CORE = 0x4 # Core file
# ELF file version
EV_NONE = 0x0
EV_CURRENT = 0x1
# ELF file machine type
EM_NONE = 0x0
EM_XTENSA = 0x5E
# section types
SEC_TYPE_PROGBITS = 0x01
SEC_TYPE_STRTAB = 0x03
# special section index
SHN_UNDEF = 0x0
# program segment types
PT_NULL = 0x0
PT_LOAD = 0x1
PT_DYNAMIC = 0x2
PT_INTERP = 0x3
PT_NOTE = 0x4
PT_SHLIB = 0x5
PT_PHDR = 0x6
def __init__(self, name=None):
"""Constructor for core dump ELF file
"""
if name:
super(ESPCoreDumpElfFile, self).__init__(name)
else:
self.sections = []
self.program_segments = []
self.aux_segments = []
self.e_type = self.ET_NONE
self.e_machine = self.EM_NONE
def _read_elf_file(self, f):
"""Reads core dump from ELF file
"""
# read the ELF file header
LEN_FILE_HEADER = 0x34
try:
header = f.read(LEN_FILE_HEADER)
(ident,type,machine,_version,
self.entrypoint,phoff,shoff,_flags,
_ehsize, phentsize,phnum,_shentsize,
shnum,shstrndx) = struct.unpack("<16sHHLLLLLHHHHHH", header)
except struct.error as e:
raise ESPCoreDumpError("Failed to read a valid ELF header from %s: %s" % (f.name, e))
if bytearray([ident[0]]) != b'\x7f' or ident[1:4] != b'ELF':
raise ESPCoreDumpError("%s has invalid ELF magic header" % f.name)
if machine != self.EM_XTENSA:
raise ESPCoreDumpError("%s does not appear to be an Xtensa ELF file. e_machine=%04x" % (f.name, machine))
self.e_type = type
self.e_machine = machine
self.sections = []
self.program_segments = []
self.aux_segments = []
if shnum > 0:
self._read_sections(f, shoff, shstrndx)
if phnum > 0:
self._read_program_segments(f, phoff, phentsize, phnum)
def _read_sections(self, f, section_header_offs, shstrndx):
"""Reads core dump sections from ELF file
"""
f.seek(section_header_offs)
section_header = f.read()
LEN_SEC_HEADER = 0x28
if len(section_header) == 0:
raise ESPCoreDumpError("No section header found at offset %04x in ELF file." % section_header_offs)
if len(section_header) % LEN_SEC_HEADER != 0:
logging.warning('Unexpected ELF section header length %04x is not mod-%02x' % (len(section_header),LEN_SEC_HEADER))
# walk through the section header and extract all sections
section_header_offsets = range(0, len(section_header), LEN_SEC_HEADER)
def read_section_header(offs):
name_offs,sec_type,flags,lma,sec_offs,size = struct.unpack_from("<LLLLLL", section_header[offs:])
return (name_offs, sec_type, flags, lma, size, sec_offs)
all_sections = [read_section_header(offs) for offs in section_header_offsets]
prog_sections = [s for s in all_sections if s[1] == esptool.ELFFile.SEC_TYPE_PROGBITS]
# search for the string table section
if not shstrndx * LEN_SEC_HEADER in section_header_offsets:
raise ESPCoreDumpError("ELF file has no STRTAB section at shstrndx %d" % shstrndx)
_,sec_type,_,_,sec_size,sec_offs = read_section_header(shstrndx * LEN_SEC_HEADER)
if sec_type != esptool.ELFFile.SEC_TYPE_STRTAB:
logging.warning('ELF file has incorrect STRTAB section type 0x%02x' % sec_type)
f.seek(sec_offs)
string_table = f.read(sec_size)
# build the real list of ELFSections by reading the actual section names from the
# string table section, and actual data for each section from the ELF file itself
def lookup_string(offs):
raw = string_table[offs:]
return raw[:raw.index(b'\x00')]
def read_data(offs,size):
f.seek(offs)
return f.read(size)
prog_sections = [ESPCoreDumpSection(lookup_string(n_offs), lma, read_data(offs, size), flags)
for (n_offs, _type, flags, lma, size, offs) in prog_sections if lma != 0]
self.sections = prog_sections
def _read_program_segments(self, f, seg_table_offs, entsz, num):
"""Reads core dump program segments from ELF file
"""
f.seek(seg_table_offs)
seg_table = f.read(entsz * num)
LEN_SEG_HEADER = 0x20
if len(seg_table) == 0:
raise ESPCoreDumpError("No program header table found at offset %04x in ELF file." % seg_table_offs)
if len(seg_table) % LEN_SEG_HEADER != 0:
logging.warning('Unexpected ELF program header table length %04x is not mod-%02x' % (len(seg_table),LEN_SEG_HEADER))
# walk through the program segment table and extract all segments
seg_table_offs = range(0, len(seg_table), LEN_SEG_HEADER)
def read_program_header(offs):
type,offset,vaddr,_paddr,filesz,_memsz,flags,_align = struct.unpack_from("<LLLLLLLL", seg_table[offs:])
return (type,offset,vaddr,filesz,flags)
prog_segments = [read_program_header(offs) for offs in seg_table_offs]
# build the real list of ImageSegment by reading actual data for each segment from the ELF file itself
def read_data(offs,size):
f.seek(offs)
return f.read(size)
# read loadable segments
self.program_segments = [ESPCoreDumpSegment(vaddr, read_data(offset, filesz), type, flags)
for (type, offset, vaddr, filesz,flags) in prog_segments if vaddr != 0]
self.aux_segments = [ESPCoreDumpSegment(vaddr, read_data(offset, filesz), type, flags)
for (type, offset, vaddr, filesz, flags) in prog_segments if type == ESPCoreDumpElfFile.PT_NOTE and vaddr == 0]
def add_program_segment(self, addr, data, type, flags):
"""Adds new program segment
"""
# TODO: currently merging with existing segments is not supported
data_sz = len(data)
# check for overlapping and merge if needed
if addr != 0 and data_sz != 0:
for ps in self.program_segments:
seg_len = len(ps.data)
if addr >= ps.addr and addr < (ps.addr + seg_len):
raise ESPCoreDumpError("Can not add overlapping region [%x..%x] to ELF file. Conflict with existing [%x..%x]." %
(addr, addr + data_sz - 1, ps.addr, ps.addr + seg_len - 1))
if (addr + data_sz) > ps.addr and (addr + data_sz) <= (ps.addr + seg_len):
raise ESPCoreDumpError("Can not add overlapping region [%x..%x] to ELF file. Conflict with existing [%x..%x]." %
(addr, addr + data_sz - 1, ps.addr, ps.addr + seg_len - 1))
# append
self.program_segments.append(ESPCoreDumpSegment(addr, data, type, flags))
def add_aux_segment(self, data, type, flags):
"""Adds new note segment
"""
self.aux_segments.append(ESPCoreDumpSegment(0, data, type, flags))
def write_program_headers(self, f, off, segs):
for seg in segs:
phdr = Elf32ProgramHeader()
phdr.p_type = seg.type
phdr.p_offset = off
phdr.p_vaddr = seg.addr
phdr.p_paddr = phdr.p_vaddr # TODO
phdr.p_filesz = len(seg.data)
phdr.p_memsz = phdr.p_filesz # TODO
phdr.p_flags = seg.flags
phdr.p_align = 0 # TODO
f.write(phdr.dump())
off += phdr.p_filesz
return off
def dump(self, f):
"""Write core dump contents to file
"""
# TODO: currently dumps only program segments.
# dumping sections is not supported yet
# write ELF header
ehdr = Elf32FileHeader()
ehdr.e_type = self.e_type
ehdr.e_machine = self.e_machine
ehdr.e_entry = 0
ehdr.e_phoff = ehdr.sizeof()
ehdr.e_shoff = 0
ehdr.e_flags = 0
ehdr.e_phentsize = Elf32ProgramHeader().sizeof()
ehdr.e_phnum = len(self.program_segments) + len(self.aux_segments)
ehdr.e_shentsize = 0
ehdr.e_shnum = 0
ehdr.e_shstrndx = self.SHN_UNDEF
f.write(ehdr.dump())
# write program header table
cur_off = ehdr.e_ehsize + ehdr.e_phnum * ehdr.e_phentsize
cur_off = self.write_program_headers(f, cur_off, self.program_segments)
cur_off = self.write_program_headers(f, cur_off, self.aux_segments)
# write program segments
for segment in self.program_segments:
f.write(segment.data)
# write aux program segments
for segment in self.aux_segments:
f.write(segment.data)
class ESPCoreDumpLoaderError(ESPCoreDumpError):
"""Core dump loader error class
"""
def __init__(self, message):
"""Constructor for core dump loader error
"""
super(ESPCoreDumpLoaderError, self).__init__(message)
class ESPCoreDumpVersion(object):
"""Core dump version class
"""
# This class contains all version-dependent params
ESP_CORE_DUMP_CHIP_ESP32 = 0
ESP_CORE_DUMP_CHIP_ESP32S2 = 2
def __init__(self, version=None):
"""Constructor for core dump version
"""
super(ESPCoreDumpVersion, self).__init__()
if version is None:
self.version = 0
else:
self.set_version(version)
@staticmethod
def make_dump_ver(maj, min):
return (((maj & 0xFF) << 8) | ((min & 0xFF) << 0))
def set_version(self, version):
self.version = version
@property
def chip_ver(self):
return ((self.version & 0xFFFF0000) >> 16)
@property
def dump_ver(self):
return (self.version & 0x0000FFFF)
@property
def major(self):
return ((self.version & 0x0000FF00) >> 8)
@property
def minor(self):
return (self.version & 0x000000FF)
class ESPCoreDumpLoader(ESPCoreDumpVersion):
"""Core dump loader base class
"""
# "legacy" stands for core dumps v0.1 (before IDF v4.1)
ESP_COREDUMP_VERSION_BIN_V1 = ESPCoreDumpVersion.make_dump_ver(0, 1)
ESP_COREDUMP_VERSION_BIN_V2 = ESPCoreDumpVersion.make_dump_ver(0, 2)
ESP_COREDUMP_VERSION_ELF_CRC32 = ESPCoreDumpVersion.make_dump_ver(1, 0)
ESP_COREDUMP_VERSION_ELF_SHA256 = ESPCoreDumpVersion.make_dump_ver(1, 1)
ESP_CORE_DUMP_INFO_TYPE = 8266
ESP_CORE_DUMP_TASK_INFO_TYPE = 678
ESP_CORE_DUMP_EXTRA_INFO_TYPE = 677
ESP_COREDUMP_CURR_TASK_MARKER = 0xdeadbeef
ESP_COREDUMP_BIN_V1_HDR_FMT = '<4L'
ESP_COREDUMP_BIN_V1_HDR_SZ = struct.calcsize(ESP_COREDUMP_BIN_V1_HDR_FMT)
ESP_COREDUMP_HDR_FMT = '<5L'
ESP_COREDUMP_HDR_SZ = struct.calcsize(ESP_COREDUMP_HDR_FMT)
ESP_COREDUMP_TSK_HDR_FMT = '<3L'
ESP_COREDUMP_TSK_HDR_SZ = struct.calcsize(ESP_COREDUMP_TSK_HDR_FMT)
ESP_COREDUMP_MEM_SEG_HDR_FMT = '<2L'
ESP_COREDUMP_MEM_SEG_HDR_SZ = struct.calcsize(ESP_COREDUMP_MEM_SEG_HDR_FMT)
ESP_COREDUMP_NOTE_HDR_FMT = '<3L'
ESP_COREDUMP_NOTE_HDR_SZ = struct.calcsize(ESP_COREDUMP_NOTE_HDR_FMT)
ESP_COREDUMP_CRC_FMT = '<L'
ESP_COREDUMP_CRC_SZ = struct.calcsize(ESP_COREDUMP_CRC_FMT)
ESP_COREDUMP_SHA256_FMT = '32c'
ESP_COREDUMP_SHA256_SZ = struct.calcsize(ESP_COREDUMP_SHA256_FMT)
def __init__(self):
"""Base constructor for core dump loader
"""
super(ESPCoreDumpLoader, self).__init__()
self.fcore = None
self.hdr = {}
def _get_registers_from_stack(self, data, grows_down):
"""Returns list of registers (in GDB format) from xtensa stack frame
"""
# from "gdb/xtensa-tdep.h"
# typedef struct
# {
# 0 xtensa_elf_greg_t pc;
# 1 xtensa_elf_greg_t ps;
# 2 xtensa_elf_greg_t lbeg;
# 3 xtensa_elf_greg_t lend;
# 4 xtensa_elf_greg_t lcount;
# 5 xtensa_elf_greg_t sar;
# 6 xtensa_elf_greg_t windowstart;
# 7 xtensa_elf_greg_t windowbase;
# 8..63 xtensa_elf_greg_t reserved[8+48];
# 64 xtensa_elf_greg_t ar[64];
# } xtensa_elf_gregset_t;
REG_PC_IDX = 0
REG_PS_IDX = 1
REG_LB_IDX = 2
REG_LE_IDX = 3
REG_LC_IDX = 4
REG_SAR_IDX = 5
# REG_WS_IDX = 6
# REG_WB_IDX = 7
REG_AR_START_IDX = 64
# REG_AR_NUM = 64
# FIXME: acc to xtensa_elf_gregset_t number of regs must be 128,
# but gdb complanis when it less then 129
REG_NUM = 129
# XT_SOL_EXIT = 0
XT_SOL_PC = 1
XT_SOL_PS = 2
# XT_SOL_NEXT = 3
XT_SOL_AR_START = 4
XT_SOL_AR_NUM = 4
# XT_SOL_FRMSZ = 8
XT_STK_EXIT = 0
XT_STK_PC = 1
XT_STK_PS = 2
XT_STK_AR_START = 3
XT_STK_AR_NUM = 16
XT_STK_SAR = 19
XT_STK_EXCCAUSE = 20
XT_STK_EXCVADDR = 21
XT_STK_LBEG = 22
XT_STK_LEND = 23
XT_STK_LCOUNT = 24
XT_STK_FRMSZ = 25
extra_regs = {ESPCoreDumpElfFile.REG_EPS2_IDX: 0, ESPCoreDumpElfFile.REG_EPS3_IDX: 0,
ESPCoreDumpElfFile.REG_EPS4_IDX: 0, ESPCoreDumpElfFile.REG_EPS5_IDX: 0,
ESPCoreDumpElfFile.REG_EPS6_IDX: 0, ESPCoreDumpElfFile.REG_EPS7_IDX: 0,
ESPCoreDumpElfFile.REG_EPC1_IDX: 0, ESPCoreDumpElfFile.REG_EPC2_IDX: 0,
ESPCoreDumpElfFile.REG_EPC3_IDX: 0, ESPCoreDumpElfFile.REG_EPC4_IDX: 0,
ESPCoreDumpElfFile.REG_EPC5_IDX: 0, ESPCoreDumpElfFile.REG_EPC6_IDX: 0,
ESPCoreDumpElfFile.REG_EPC7_IDX: 0}
regs = [0] * REG_NUM
# TODO: support for growing up stacks
if not grows_down:
raise ESPCoreDumpLoaderError("Growing up stacks are not supported for now!")
ex_struct = "<%dL" % XT_STK_FRMSZ
if len(data) < struct.calcsize(ex_struct):
raise ESPCoreDumpLoaderError("Too small stack to keep frame: %d bytes!" % len(data))
stack = struct.unpack(ex_struct, data[:struct.calcsize(ex_struct)])
# Stack frame type indicator is always the first item
rc = stack[XT_STK_EXIT]
if rc != 0:
regs[REG_PC_IDX] = stack[XT_STK_PC]
regs[REG_PS_IDX] = stack[XT_STK_PS]
for i in range(XT_STK_AR_NUM):
regs[REG_AR_START_IDX + i] = stack[XT_STK_AR_START + i]
regs[REG_SAR_IDX] = stack[XT_STK_SAR]
regs[REG_LB_IDX] = stack[XT_STK_LBEG]
regs[REG_LE_IDX] = stack[XT_STK_LEND]
regs[REG_LC_IDX] = stack[XT_STK_LCOUNT]
# FIXME: crashed and some running tasks (e.g. prvIdleTask) have EXCM bit set
# and GDB can not unwind callstack properly (it implies not windowed call0)
if regs[REG_PS_IDX] & (1 << 5):
regs[REG_PS_IDX] &= ~(1 << 4)
if stack[XT_STK_EXCCAUSE] in xtensa_exception_cause_dict:
extra_regs[ESPCoreDumpElfFile.REG_EXCCAUSE_IDX] = stack[XT_STK_EXCCAUSE]
else:
extra_regs[ESPCoreDumpElfFile.REG_EXCCAUSE_IDX] = INVALID_CAUSE_VALUE
extra_regs[ESPCoreDumpElfFile.REG_EXCVADDR_IDX] = stack[XT_STK_EXCVADDR]
else:
regs[REG_PC_IDX] = stack[XT_SOL_PC]
regs[REG_PS_IDX] = stack[XT_SOL_PS]
for i in range(XT_SOL_AR_NUM):
regs[REG_AR_START_IDX + i] = stack[XT_SOL_AR_START + i]
# nxt = stack[XT_SOL_NEXT]
return regs,extra_regs
def tcb_is_sane(self, tcb_addr, tcb_size):
"""Check tcb address if it is correct
"""
return not (tcb_addr < 0x3ffae000 or (tcb_addr + tcb_size) > 0x40000000)
def stack_is_sane(self, sp):
"""Check stack address if it is correct
"""
return not(sp < 0x3ffae010 or sp > 0x3fffffff)
def addr_is_fake(self, addr):
"""Check if address is in fake area
"""
return ((addr < 0x3f3fffff and addr >= 0x20000000) or addr >= 0x80000000)
def remove_tmp_file(self, fname):
"""Silently removes temporary file
"""
try:
os.remove(fname)
except OSError as e:
if e.errno != errno.ENOENT:
logging.warning("Failed to remove temp file '%s' (%d)!" % (fname, e.errno))
def cleanup(self):
"""Cleans up loader resources
"""
if self.fcore:
self.fcore.close()
if self.fcore_name:
self.remove_tmp_file(self.fcore_name)
def _extract_elf_corefile(self, core_fname=None, off=0, exe_name=None):
""" Reads the ELF formatted core dump image and parse it
"""
core_off = off
self.set_version(self.hdr['ver'])
if self.dump_ver == self.ESP_COREDUMP_VERSION_ELF_CRC32:
checksum_len = self.ESP_COREDUMP_CRC_SZ
elif self.dump_ver == self.ESP_COREDUMP_VERSION_ELF_SHA256:
checksum_len = self.ESP_COREDUMP_SHA256_SZ
else:
raise ESPCoreDumpLoaderError("Core dump version '%d' is not supported!" % self.dump_ver)
core_elf = ESPCoreDumpElfFile()
data = self.read_data(core_off, self.hdr['tot_len'] - checksum_len - self.ESP_COREDUMP_HDR_SZ)
with open(core_fname, 'w+b') as fce:
try:
fce.write(data)
fce.flush()
fce.seek(0)
core_elf._read_elf_file(fce)
if exe_name:
exe_elf = ESPCoreDumpElfFile(exe_name)
# Read note segments from core file which are belong to tasks (TCB or stack)
for ns in core_elf.aux_segments:
if ns.type != ESPCoreDumpElfFile.PT_NOTE:
continue
note_read = 0
while note_read < len(ns.data):
note = Elf32NoteDesc("", 0, None)
note_read += note.read(ns.data[note_read:])
# Check for version info note
if 'ESP_CORE_DUMP_INFO' == note.name and note.type == self.ESP_CORE_DUMP_INFO_TYPE and exe_name:
app_sha256 = binascii.hexlify(exe_elf.sha256())
n_ver_len = struct.calcsize("<L")
n_sha256_len = self.ESP_COREDUMP_SHA256_SZ * 2 # SHA256 as hex string
n_ver,coredump_sha256 = struct.unpack("<L%ds" % (n_sha256_len), note.desc[:n_ver_len + n_sha256_len])
if coredump_sha256 != app_sha256 or ESPCoreDumpVersion(n_ver).dump_ver != self.dump_ver:
raise ESPCoreDumpError("Invalid application image for coredump: app_SHA256(%s) != coredump_SHA256(%s)." %
(app_sha256, coredump_sha256))
except ESPCoreDumpError as e:
logging.warning("Failed to extract ELF core dump image into file %s. (Reason: %s)" % (core_fname, e))
return core_fname
def _extract_bin_corefile(self, core_fname=None, rom_elf=None, off=0):
"""Creates core dump ELF file
"""
core_off = off
with open(core_fname, 'w+b') as fce:
tcbsz_aligned = self.hdr['tcbsz']
if tcbsz_aligned % 4:
tcbsz_aligned = 4 * (old_div(tcbsz_aligned,4) + 1)
core_elf = ESPCoreDumpElfFile()
notes = b''
core_dump_info_notes = b''
task_info_notes = b''
task_status = EspCoreDumpTaskStatus()
for i in range(self.hdr['task_num']):
task_status.task_index = i
task_status.task_flags = EspCoreDumpTaskStatus.TASK_STATUS_CORRECT
data = self.read_data(core_off, self.ESP_COREDUMP_TSK_HDR_SZ)
tcb_addr,stack_top,stack_end = struct.unpack_from(self.ESP_COREDUMP_TSK_HDR_FMT, data)
if stack_end > stack_top:
stack_len = stack_end - stack_top
stack_base = stack_top
else:
stack_len = stack_top - stack_end
stack_base = stack_end
stack_len_aligned = stack_len
if stack_len_aligned % 4:
stack_len_aligned = 4 * (old_div(stack_len_aligned,4) + 1)
core_off += self.ESP_COREDUMP_TSK_HDR_SZ
logging.debug("Read TCB %d bytes @ 0x%x" % (tcbsz_aligned, tcb_addr))
data = self.read_data(core_off, tcbsz_aligned)
task_status.task_tcb_addr = tcb_addr
try:
if self.tcb_is_sane(tcb_addr, tcbsz_aligned):
if self.hdr['tcbsz'] != tcbsz_aligned:
core_elf.add_program_segment(tcb_addr, data[:self.hdr['tcbsz'] - tcbsz_aligned],
ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
else:
core_elf.add_program_segment(tcb_addr, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
# task_status.task_name = bytearray("%s\0" % task_name_str, encoding='ascii')
elif tcb_addr and self.addr_is_fake(tcb_addr):
task_status.task_flags |= EspCoreDumpTaskStatus.TASK_STATUS_TCB_CORRUPTED
except ESPCoreDumpError as e:
logging.warning("Skip TCB %d bytes @ 0x%x. (Reason: %s)" % (tcbsz_aligned, tcb_addr, e))
core_off += tcbsz_aligned
logging.debug("Read stack %d bytes @ 0x%x" % (stack_len_aligned, stack_base))
data = self.read_data(core_off, stack_len_aligned)
if stack_len != stack_len_aligned:
data = data[:stack_len - stack_len_aligned]
task_status.task_stack_start = stack_base
task_status.task_stack_len = stack_len_aligned
try:
if self.stack_is_sane(stack_base):
core_elf.add_program_segment(stack_base, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
elif stack_base and self.addr_is_fake(stack_base):
task_status.task_flags |= EspCoreDumpTaskStatus.TASK_STATUS_STACK_CORRUPTED
core_elf.add_program_segment(stack_base, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
except ESPCoreDumpError as e:
logging.warning("Skip task's (%x) stack %d bytes @ 0x%x. (Reason: %s)" % (tcb_addr, stack_len_aligned, stack_base, e))
core_off += stack_len_aligned
try:
logging.debug("Stack start_end: 0x%x @ 0x%x" % (stack_top, stack_end))
task_regs,extra_regs = self._get_registers_from_stack(data, stack_end > stack_top)
except Exception as e:
logging.error(e)
return None
task_info_notes += Elf32NoteDesc("TASK_INFO", self.ESP_CORE_DUMP_TASK_INFO_TYPE, task_status.dump()).dump()
prstatus = XtensaPrStatus()
prstatus.pr_cursig = 0 # TODO: set sig only for current/failed task
prstatus.pr_pid = tcb_addr
note = Elf32NoteDesc("CORE", 1, prstatus.dump() + struct.pack("<%dL" % len(task_regs), *task_regs)).dump()
notes += note
if ESPCoreDumpElfFile.REG_EXCCAUSE_IDX in extra_regs and len(core_dump_info_notes) == 0:
# actually there will be only one such note - for crashed task
core_dump_info_notes += Elf32NoteDesc("ESP_CORE_DUMP_INFO", self.ESP_CORE_DUMP_INFO_TYPE, struct.pack("<L", self.hdr['ver'])).dump()
exc_regs = []
for reg_id in extra_regs:
exc_regs.extend([reg_id, extra_regs[reg_id]])
core_dump_info_notes += Elf32NoteDesc("EXTRA_INFO", self.ESP_CORE_DUMP_EXTRA_INFO_TYPE,
struct.pack("<%dL" % (1 + len(exc_regs)), tcb_addr, *exc_regs)).dump()
self.set_version(self.hdr['ver'])
if self.dump_ver == self.ESP_COREDUMP_VERSION_BIN_V2:
for i in range(self.hdr['segs_num']):
data = self.read_data(core_off, self.ESP_COREDUMP_MEM_SEG_HDR_SZ)
core_off += self.ESP_COREDUMP_MEM_SEG_HDR_SZ
mem_start,mem_sz = struct.unpack_from(self.ESP_COREDUMP_MEM_SEG_HDR_FMT, data)
logging.debug("Read memory segment %d bytes @ 0x%x" % (mem_sz, mem_start))
data = self.read_data(core_off, mem_sz)
core_elf.add_program_segment(mem_start, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
core_off += mem_sz
# add notes
try:
core_elf.add_aux_segment(notes, ESPCoreDumpElfFile.PT_NOTE, 0)
except ESPCoreDumpError as e:
logging.warning("Skip NOTES segment %d bytes @ 0x%x. (Reason: %s)" % (len(notes), 0, e))
# add core dump info notes
try:
core_elf.add_aux_segment(core_dump_info_notes, ESPCoreDumpElfFile.PT_NOTE, 0)
except ESPCoreDumpError as e:
logging.warning("Skip core dump info NOTES segment %d bytes @ 0x%x. (Reason: %s)" % (len(core_dump_info_notes), 0, e))
try:
core_elf.add_aux_segment(task_info_notes, ESPCoreDumpElfFile.PT_NOTE, 0)
except ESPCoreDumpError as e:
logging.warning("Skip failed tasks info NOTES segment %d bytes @ 0x%x. (Reason: %s)" % (len(task_info_notes), 0, e))
# add ROM text sections
if rom_elf:
for ps in rom_elf.program_segments:
if (ps.flags & ESPCoreDumpSegment.PF_X) == 0:
continue
try:
core_elf.add_program_segment(ps.addr, ps.data, ESPCoreDumpElfFile.PT_LOAD, ps.flags)
except ESPCoreDumpError as e:
logging.warning("Skip ROM segment %d bytes @ 0x%x. (Reason: %s)" % (len(ps.data), ps.addr, e))
# dump core ELF
core_elf.e_type = ESPCoreDumpElfFile.ET_CORE
core_elf.e_machine = ESPCoreDumpElfFile.EM_XTENSA
core_elf.dump(fce)
return core_fname
def create_corefile(self, core_fname=None, exe_name=None, rom_elf=None, off=0):
"""Creates core dump ELF file
"""
data = self.read_data(off, self.ESP_COREDUMP_HDR_SZ)
vals = struct.unpack_from(self.ESP_COREDUMP_HDR_FMT, data)
self.hdr = dict(zip(('tot_len', 'ver', 'task_num', 'tcbsz', 'segs_num'), vals))
if not core_fname:
fce = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
core_fname = fce.name
self.set_version(self.hdr['ver'])
if self.chip_ver == ESPCoreDumpVersion.ESP_CORE_DUMP_CHIP_ESP32S2 or self.chip_ver == ESPCoreDumpVersion.ESP_CORE_DUMP_CHIP_ESP32:
if self.dump_ver == self.ESP_COREDUMP_VERSION_ELF_CRC32 or self.dump_ver == self.ESP_COREDUMP_VERSION_ELF_SHA256:
return self._extract_elf_corefile(core_fname, off + self.ESP_COREDUMP_HDR_SZ, exe_name)
elif self.dump_ver == self.ESP_COREDUMP_VERSION_BIN_V2:
return self._extract_bin_corefile(core_fname, rom_elf, off + self.ESP_COREDUMP_HDR_SZ)
elif self.dump_ver == self.ESP_COREDUMP_VERSION_BIN_V1:
return self._extract_bin_corefile(core_fname, rom_elf, off + self.ESP_COREDUMP_BIN_V1_HDR_SZ)
raise ESPCoreDumpLoaderError("Core dump version '0x%x' is not supported!" % (self.dump_ver))
else:
raise ESPCoreDumpLoaderError("Core dump chip '0x%x' is not supported!" % (self.chip_ver))
def read_data(self, off, sz):
"""Reads data from raw core dump got from flash or UART
"""
self.fcore.seek(off)
data = self.fcore.read(sz)
return data
class ESPCoreDumpFileLoader(ESPCoreDumpLoader):
"""Core dump file loader class
"""
def __init__(self, path, b64=False):
"""Constructor for core dump file loader
"""
super(ESPCoreDumpFileLoader, self).__init__()
self.fcore = self._load_coredump(path, b64)
def _load_coredump(self, path, b64):
"""Loads core dump from (raw binary or base64-encoded) file
"""
logging.debug("Load core dump from '%s'", path)
self.fcore_name = None
if b64:
fhnd,self.fcore_name = tempfile.mkstemp()
fcore = os.fdopen(fhnd, 'wb')
fb64 = open(path, 'rb')
try:
while True:
line = fb64.readline()
if len(line) == 0:
break
data = base64.standard_b64decode(line.rstrip(b'\r\n'))
fcore.write(data)
fcore.close()
fcore = open(self.fcore_name, 'rb')
except Exception as e:
if self.fcore_name:
self.remove_tmp_file(self.fcore_name)
raise e
finally:
fb64.close()
else:
fcore = open(path, 'rb')
return fcore
class ESPCoreDumpFlashLoader(ESPCoreDumpLoader):
"""Core dump flash loader class
"""
ESP_COREDUMP_FLASH_LEN_FMT = '<L'
ESP_COREDUMP_FLASH_LEN_SZ = struct.calcsize(ESP_COREDUMP_FLASH_LEN_FMT)
ESP_COREDUMP_PART_TABLE_OFF = 0x8000
def __init__(self, off, tool_path=None, chip='esp32', port=None, baud=None):
"""Constructor for core dump flash loader
"""
super(ESPCoreDumpFlashLoader, self).__init__()
self.port = port
self.baud = baud
self.chip = chip
self.dump_sz = 0
self.fcore = self._load_coredump(off)
def get_tool_path(self, use_esptool=None):
"""Get tool path
"""
if use_esptool:
tool_path = os.path.join(idf_path, 'components', 'esptool_py', 'esptool') + os.path.sep
else:
tool_path = os.path.join(idf_path, 'components', 'partition_table') + os.path.sep
return tool_path
def get_core_dump_partition_info(self, part_off=None, tool_path=None):
"""Get core dump partition info using parttool
"""
logging.info("Retrieving core dump partition offset and size...")
if not tool_path:
tool_path = self.get_tool_path(use_esptool=False)
if not part_off:
part_off = self.ESP_COREDUMP_PART_TABLE_OFF
size = None
offset = None
try:
tool_args = [sys.executable, tool_path + 'parttool.py', "-q", "--partition-table-offset", str(part_off)]
if self.port:
tool_args.extend(['--port', self.port])
invoke_args = tool_args + ["get_partition_info", "--partition-type", "data", "--partition-subtype", "coredump", "--info", "offset", "size"]
(offset_str, size_str) = subprocess.check_output(invoke_args).strip().split(b" ")
size = int(size_str, 16)
offset = int(offset_str, 16)
logging.info("Core dump partition offset=%d, size=%d", offset, size)
except subprocess.CalledProcessError as e:
logging.error("parttool get partition info failed with err %d" % e.returncode)
logging.debug("Command ran: '%s'" % e.cmd)
logging.debug("Command out:")
logging.debug(e.output)
logging.error("Check if the coredump partition exists in partition table.")
raise e
return (offset, size)
def invoke_parttool(self, tool_path=None):
"""Loads core dump from flash using parttool
"""
part_tool_args = [sys.executable, tool_path + 'parttool.py']
if self.port:
part_tool_args.extend(['--port', self.port])
part_tool_args.extend(['read_partition', '--partition-type', 'data', '--partition-subtype', 'coredump', '--output'])
self.fcore_name = None
f = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
try:
part_tool_args.append(f.name)
self.fcore_name = f.name
# read core dump partition
et_out = subprocess.check_output(part_tool_args)
if len(et_out):
logging.info(et_out.decode('utf-8'))
self.dump_sz = self._read_core_dump_length(f)
f.seek(self.dump_sz)
# cut free space of the partition
f.truncate()
f.seek(0)
except subprocess.CalledProcessError as e:
logging.error("parttool script execution failed with err %d" % e.returncode)
logging.debug("Command ran: '%s'" % e.cmd)
logging.debug("Command out:")
logging.debug(e.output)
if self.fcore_name:
f.close()
self.remove_tmp_file(self.fcore_name)
raise e
return f
def invoke_esptool(self, tool_path=None, off=None):
"""Loads core dump from flash using elftool
"""
tool_args = [sys.executable, tool_path + 'esptool.py', '-c', self.chip]
if self.port:
tool_args.extend(['-p', self.port])
if self.baud:
tool_args.extend(['-b', str(self.baud)])
f = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
self.fcore_name = None
try:
(part_offset, part_size) = self.get_core_dump_partition_info(tool_path='')
if not off:
off = part_offset # set default offset if not specified
logging.warning("The core dump image offset is not specified. Use partition offset: %d.", part_offset)
if part_offset != off:
logging.warning("Predefined image offset: %d does not match core dump partition offset: %d", off, part_offset)
tool_args.extend(['read_flash', str(off), str(self.ESP_COREDUMP_FLASH_LEN_SZ)])
tool_args.append(f.name)
self.fcore_name = f.name
# read core dump length
et_out = subprocess.check_output(tool_args)
if len(et_out):
logging.info(et_out.decode('utf-8'))
self.dump_sz = self._read_core_dump_length(f)
if self.dump_sz == 0 or self.dump_sz > part_size:
logging.error("Incorrect size of core dump image: %d, use partition size instead: %d", self.dump_sz, part_size)
self.dump_sz = part_size
# set actual size of core dump image and read it from flash
tool_args[-2] = str(self.dump_sz)
et_out = subprocess.check_output(tool_args)
if len(et_out):
logging.info(et_out.decode('utf-8'))
except subprocess.CalledProcessError as e:
logging.error("esptool script execution failed with err %d" % e.returncode)
logging.debug("Command ran: '%s'" % e.cmd)
logging.debug("Command out:")
logging.debug(e.output)
if self.fcore_name:
f.close()
self.remove_tmp_file(self.fcore_name)
raise e
return f
def _load_coredump(self, off=None):
"""Loads core dump from flash using parttool or elftool (if offset is set)
"""
tool_path = None
try:
if off:
tool_path = ''
logging.info("Invoke esptool to read image.")
f = self.invoke_esptool(tool_path=tool_path, off=off)
else:
tool_path = ''
logging.info("Invoke parttool to read image.")
f = self.invoke_parttool(tool_path=tool_path)
except subprocess.CalledProcessError as e:
if len(e.output):
logging.info(e.output)
logging.warning("System path is not set. Try to use predefined path.")
if off:
tool_path = self.get_tool_path(use_esptool=True)
f = self.invoke_esptool(tool_path=tool_path, off=off)
else:
tool_path = self.get_tool_path(use_esptool=False)
f = self.invoke_parttool(tool_path=tool_path)
return f
def _read_core_dump_length(self, f):
"""Reads core dump length
"""
data = f.read(self.ESP_COREDUMP_FLASH_LEN_SZ)
tot_len, = struct.unpack_from(self.ESP_COREDUMP_FLASH_LEN_FMT, data)
return tot_len
def create_corefile(self, core_fname=None, exe_name=None, rom_elf=None):
"""Checks flash coredump data integrity and creates ELF file
"""
data = self.read_data(0, self.ESP_COREDUMP_HDR_SZ)
self.checksum_len = 0
_,coredump_ver_data,_,_,_ = struct.unpack_from(self.ESP_COREDUMP_HDR_FMT, data)
self.set_version(coredump_ver_data)
if self.chip_ver != ESPCoreDumpVersion.ESP_CORE_DUMP_CHIP_ESP32S2 and self.chip_ver != ESPCoreDumpVersion.ESP_CORE_DUMP_CHIP_ESP32:
raise ESPCoreDumpLoaderError("Invalid core dump chip version: '%s', should be <= '0x%x'" % (self.chip_ver, self.ESP_CORE_DUMP_CHIP_ESP32S2))
if self.dump_ver == self.ESP_COREDUMP_VERSION_ELF_CRC32 or self.dump_ver == self.ESP_COREDUMP_VERSION_BIN_V1 \
or self.dump_ver == self.ESP_COREDUMP_VERSION_BIN_V2:
logging.debug("Dump size = %d, crc off = 0x%x", self.dump_sz, self.dump_sz - self.ESP_COREDUMP_CRC_SZ)
data = self.read_data(self.dump_sz - self.ESP_COREDUMP_CRC_SZ, self.ESP_COREDUMP_CRC_SZ)
dump_crc, = struct.unpack_from(self.ESP_COREDUMP_CRC_FMT, data)
data = self.read_data(0, self.dump_sz - self.ESP_COREDUMP_CRC_SZ)
data_crc = binascii.crc32(data) & 0xffffffff
if dump_crc != data_crc:
raise ESPCoreDumpLoaderError("Invalid core dump CRC %x, should be %x" % (data_crc, dump_crc))
elif self.dump_ver == self.ESP_COREDUMP_VERSION_ELF_SHA256:
dump_sha256 = self.read_data(self.dump_sz - self.ESP_COREDUMP_SHA256_SZ, self.ESP_COREDUMP_SHA256_SZ)
data = self.read_data(0, self.dump_sz - self.ESP_COREDUMP_SHA256_SZ)
data_sha256 = sha256(data)
data_sha256_str = data_sha256.hexdigest()
dump_sha256_str = binascii.hexlify(dump_sha256).decode('ascii')
if dump_sha256_str != data_sha256_str:
raise ESPCoreDumpLoaderError("Invalid core dump SHA256 '%s', should be '%s'" % (dump_sha256_str, data_sha256_str))
return super(ESPCoreDumpFlashLoader, self).create_corefile(core_fname, exe_name)
class GDBMIOutRecordHandler(object):
"""GDB/MI output record handler base class
"""
TAG = ''
def __init__(self, f, verbose=False):
"""Base constructor for GDB/MI output record handler
"""
self.verbose = verbose
def execute(self, ln):
"""Base method to execute GDB/MI output record handler function
"""
if self.verbose:
logging.debug("%s.execute: [[%s]]" % (self.__class__.__name__, ln))
class GDBMIOutStreamHandler(GDBMIOutRecordHandler):
"""GDB/MI output stream handler class
"""
def __init__(self, f, verbose=False):
"""Constructor for GDB/MI output stream handler
"""
super(GDBMIOutStreamHandler, self).__init__(None, verbose)
self.func = f
def execute(self, ln):
"""Executes GDB/MI output stream handler function
"""
GDBMIOutRecordHandler.execute(self, ln)
if self.func:
# remove TAG / quotes and replace c-string \n with actual NL
self.func(ln[1:].strip('"').replace('\\n', '\n').replace('\\t', '\t'))
class GDBMIResultHandler(GDBMIOutRecordHandler):
"""GDB/MI result handler class
"""
TAG = '^'
RC_DONE = 'done'
RC_RUNNING = 'running'
RC_CONNECTED = 'connected'
RC_ERROR = 'error'
RC_EXIT = 'exit'
def __init__(self, verbose=False):
"""Constructor for GDB/MI result handler
"""
super(GDBMIResultHandler, self).__init__(None, verbose)
self.result_class = ''
self.result_str = ''
def _parse_rc(self, ln, rc):
"""Parses result code
"""
rc_str = "{0}{1}".format(self.TAG, rc)
if not ln.startswith(rc_str):
return False
self.result_class = rc
if len(ln) > len(rc_str):
self.result_str = ln[len(rc_str):]
if self.result_str.startswith(','):
self.result_str = self.result_str[1:]
else:
logging.error("Invalid result format: '%s'" % ln)
else:
self.result_str = ''
return True
def execute(self, ln):
"""Executes GDB/MI result handler function
"""
GDBMIOutRecordHandler.execute(self, ln)
if self._parse_rc(ln, self.RC_DONE):
return
if self._parse_rc(ln, self.RC_RUNNING):
return
if self._parse_rc(ln, self.RC_CONNECTED):
return
if self._parse_rc(ln, self.RC_ERROR):
return
if self._parse_rc(ln, self.RC_EXIT):
return
logging.error("Unknown GDB/MI result: '%s'" % ln)
class GDBMIThreadListIdsHandler(GDBMIResultHandler):
"""GDB/MI thread-list-ids handler class
"""
def __init__(self, verbose=False):
"""Constructor for GDB/MI result handler
"""
super(GDBMIThreadListIdsHandler, self).__init__(verbose)
self.threads = []
self.current_thread = ''
def execute(self, ln):
"""Executes GDB/MI thread-list-ids handler function
"""
GDBMIResultHandler.execute(self, ln)
if self.result_class != self.RC_DONE:
return
# simple parsing method
result = re.search(r'thread-ids\s*=\s*\{([^\{\}]*)\}', self.result_str)
if result:
for tid in re.finditer(r'thread-id="(\d+)"', result.group(1)):
self.threads.append(tid.group(1))
result = re.search(r'current-thread-id="(\d+)"', self.result_str)
if result:
self.current_thread = result.group(1)
class GDBMIThreadSelectHandler(GDBMIResultHandler):
"""GDB/MI thread-select handler class
"""
def execute(self, ln):
"""Executes GDB/MI thread-select handler function
"""
GDBMIResultHandler.execute(self, ln)
if self.result_class != self.RC_DONE:
return
class GDBMIThreadInfoHandler(GDBMIResultHandler):
"""GDB/MI thread-info handler class
"""
def __init__(self, verbose=False):
"""Constructor for GDB/MI result handler
"""
super(GDBMIThreadInfoHandler, self).__init__(verbose)
self.current = False
self.id = ''
self.target_id = ''
self.details = ''
self.name = ''
self.frame = ''
self.state = ''
self.core = ''
def execute(self, ln):
"""Executes GDB/MI thread-info handler function
"""
GDBMIResultHandler.execute(self, ln)
if self.result_class != self.RC_DONE:
return
# simple parsing method
result = re.search(r'id="(\d+)"', self.result_str)
if result:
self.id = result.group(1)
result = re.search(r'current="\*"', self.result_str)
if result:
self.current = True
result = re.search(r'target-id="([^"]+)"', self.result_str)
if result:
self.target_id = result.group(1)
class GDBMIDataEvalHandler(GDBMIResultHandler):
"""GDB/MI data-evaluate-expression handler class
"""
def __init__(self, verbose=False):
"""Constructor for GDB/MI result handler
"""
super(GDBMIDataEvalHandler, self).__init__(verbose)
self.value = ''
def execute(self, ln):
"""Executes GDB/MI data-evaluate-expression handler function
"""
GDBMIResultHandler.execute(self, ln)
if self.result_class != self.RC_DONE:
return
# simple parsing method
if self.verbose:
logging.debug("GDBMIDataEvalHandler: result '%s'", self.result_str)
pos = 0
r = re.compile(r'([a-zA-Z_]+)=(.+)\,')
while True:
m = r.search(self.result_str, pos=pos)
if not m:
break
if m.group(1) == 'value':
if self.verbose:
logging.debug("GDBMIDataEvalHandler: found value = '%s'", m.group(2))
self.value = self.result.group(1)
return
pos = m.end(2) + 1
res_str = self.result_str[pos:]
res_str = res_str.replace(r'\"', '\'')
m = re.search(r'value="([^"]+)"', res_str)
if m:
if self.verbose:
logging.debug("GDBMIDataEvalHandler: found value = '%s'", m.group(1))
self.value = m.group(1)
class GDBMIStreamConsoleHandler(GDBMIOutStreamHandler):
"""GDB/MI console stream handler class
"""
TAG = '~'
def load_aux_elf(elf_path):
""" Loads auxilary ELF file and composes GDB command to read its symbols
"""
elf = None
sym_cmd = ''
if os.path.exists(elf_path):
elf = ESPCoreDumpElfFile(elf_path)
for s in elf.sections:
if s.name == '.text':
sym_cmd = 'add-symbol-file %s 0x%x' % (elf_path, s.addr)
return (elf, sym_cmd)
def dbg_corefile(args):
""" Command to load core dump from file or flash and run GDB debug session with it
"""
global CLOSE_FDS
loader = None
rom_elf,rom_sym_cmd = load_aux_elf(args.rom_elf)
if not args.core:
loader = ESPCoreDumpFlashLoader(args.off, port=args.port, baud=args.baud)
core_fname = loader.create_corefile(args.save_core, exe_name=args.prog, rom_elf=rom_elf)
if not core_fname:
logging.error("Failed to create corefile!")
loader.cleanup()
return
else:
core_fname = args.core
if args.core_format and args.core_format != 'elf':
loader = ESPCoreDumpFileLoader(core_fname, args.core_format == 'b64')
core_fname = loader.create_corefile(args.save_core, exe_name=args.prog, rom_elf=rom_elf)
if not core_fname:
logging.error("Failed to create corefile!")
loader.cleanup()
return
p = subprocess.Popen(bufsize=0,
args=[args.gdb,
'--nw', # ignore .gdbinit
'--core=%s' % core_fname, # core file,
'-ex', rom_sym_cmd,
args.prog
],
stdin=None, stdout=None, stderr=None,
close_fds=CLOSE_FDS
)
p.wait()
if loader:
if not args.core and not args.save_core:
loader.remove_tmp_file(core_fname)
loader.cleanup()
print('Done!')
def info_corefile(args):
""" Command to load core dump from file or flash and print it's data in user friendly form
"""
global CLOSE_FDS
def gdbmi_console_stream_handler(ln):
sys.stdout.write(ln)
sys.stdout.flush()
def gdbmi_read2prompt(f, out_handlers=None):
while True:
ln = f.readline().decode('utf-8').rstrip(' \r\n')
if ln == '(gdb)':
break
elif len(ln) == 0:
break
elif out_handlers:
for h in out_handlers:
if ln.startswith(out_handlers[h].TAG):
out_handlers[h].execute(ln)
break
def gdbmi_start(handlers, gdb_cmds):
gdb_args = [args.gdb,
'--quiet', # inhibit dumping info at start-up
'--nx', # inhibit window interface
'--nw', # ignore .gdbinit
'--interpreter=mi2', # use GDB/MI v2
'--core=%s' % core_fname] # core file
for c in gdb_cmds:
gdb_args += ['-ex', c]
gdb_args.append(args.prog)
p = subprocess.Popen(bufsize=0,
args=gdb_args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=CLOSE_FDS)
gdbmi_read2prompt(p.stdout, handlers)
return p
def gdbmi_cmd_exec(p, handlers, gdbmi_cmd):
for t in handlers:
handlers[t].result_class = None
p.stdin.write(bytearray("%s\n" % gdbmi_cmd, encoding='utf-8'))
gdbmi_read2prompt(p.stdout, handlers)
if not handlers[GDBMIResultHandler.TAG].result_class or handlers[GDBMIResultHandler.TAG].result_class == GDBMIResultHandler.RC_EXIT:
logging.error("GDB exited (%s / %s)!" % (handlers[GDBMIResultHandler.TAG].result_class, handlers[GDBMIResultHandler.TAG].result_str))
p.wait()
logging.error("Problem occured! GDB exited, restart it.")
p = gdbmi_start(handlers, [])
elif handlers[GDBMIResultHandler.TAG].result_class != GDBMIResultHandler.RC_DONE:
logging.error("GDB/MI command failed (%s / %s)!" % (handlers[GDBMIResultHandler.TAG].result_class, handlers[GDBMIResultHandler.TAG].result_str))
return p
def gdbmi_getinfo(p, handlers, gdb_cmd):
return gdbmi_cmd_exec(p, handlers, "-interpreter-exec console \"%s\"" % gdb_cmd)
def gdbmi_get_thread_ids(p):
handlers = {}
result = GDBMIThreadListIdsHandler(verbose=False)
handlers[GDBMIResultHandler.TAG] = result
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
p = gdbmi_cmd_exec(p, handlers, "-thread-list-ids")
return p,result.threads,result.current_thread
def gdbmi_switch_thread(p, thr_id):
handlers = {}
result = GDBMIThreadSelectHandler(verbose=False)
handlers[GDBMIResultHandler.TAG] = result
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
return gdbmi_cmd_exec(p, handlers, "-thread-select %s" % thr_id)
def gdbmi_get_thread_info(p, thr_id):
handlers = {}
result = GDBMIThreadInfoHandler(verbose=False)
handlers[GDBMIResultHandler.TAG] = result
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
if thr_id:
cmd = "-thread-info %s" % thr_id
else:
cmd = "-thread-info"
p = gdbmi_cmd_exec(p, handlers, cmd)
return p,result
def gdbmi_data_evaluate_expression(p, expr):
handlers = {}
result = GDBMIDataEvalHandler(verbose=False)
handlers[GDBMIResultHandler.TAG] = result
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
p = gdbmi_cmd_exec(p, handlers, "-data-evaluate-expression \"%s\"" % expr)
return p,result
def gdbmi_freertos_get_task_name(p, tcb_addr):
p,res = gdbmi_data_evaluate_expression(p, "(char*)((TCB_t *)0x%x)->pcTaskName" % tcb_addr)
result = re.match("0x[a-fA-F0-9]+[^']*'([^']*)'", res.value)
if result:
return p,result.group(1)
return p,''
def gdb2freertos_thread_id(gdb_thread_id):
return int(gdb_thread_id.replace("process ", ""), 0)
loader = None
rom_elf,rom_sym_cmd = load_aux_elf(args.rom_elf)
if not args.core:
loader = ESPCoreDumpFlashLoader(args.off, port=args.port, baud=args.baud)
core_fname = loader.create_corefile(args.save_core, exe_name=args.prog, rom_elf=rom_elf)
if not core_fname:
logging.error("Failed to create corefile!")
loader.cleanup()
return
else:
core_fname = args.core
if args.core_format and args.core_format != 'elf':
loader = ESPCoreDumpFileLoader(core_fname, args.core_format == 'b64')
core_fname = loader.create_corefile(args.save_core, exe_name=args.prog, rom_elf=rom_elf)
if not core_fname:
logging.error("Failed to create corefile!")
loader.cleanup()
return
exe_elf = ESPCoreDumpElfFile(args.prog)
core_elf = ESPCoreDumpElfFile(core_fname)
merged_segs = []
core_segs = core_elf.program_segments
for s in exe_elf.sections:
merged = False
for ps in core_segs:
if ps.addr <= s.addr and ps.addr + len(ps.data) >= s.addr:
# sec: |XXXXXXXXXX|
# seg: |...XXX.............|
seg_addr = ps.addr
if ps.addr + len(ps.data) <= s.addr + len(s.data):
# sec: |XXXXXXXXXX|
# seg: |XXXXXXXXXXX...|
# merged: |XXXXXXXXXXXXXX|
seg_len = len(s.data) + (s.addr - ps.addr)
else:
# sec: |XXXXXXXXXX|
# seg: |XXXXXXXXXXXXXXXXX|
# merged: |XXXXXXXXXXXXXXXXX|
seg_len = len(ps.data)
merged_segs.append((s.name, seg_addr, seg_len, s.attr_str(), True))
core_segs.remove(ps)
merged = True
elif ps.addr >= s.addr and ps.addr <= s.addr + len(s.data):
# sec: |XXXXXXXXXX|
# seg: |...XXX.............|
seg_addr = s.addr
if (ps.addr + len(ps.data)) >= (s.addr + len(s.data)):
# sec: |XXXXXXXXXX|
# seg: |..XXXXXXXXXXX|
# merged: |XXXXXXXXXXXXX|
seg_len = len(s.data) + (ps.addr + len(ps.data)) - (s.addr + len(s.data))
else:
# sec: |XXXXXXXXXX|
# seg: |XXXXXX|
# merged: |XXXXXXXXXX|
seg_len = len(s.data)
merged_segs.append((s.name, seg_addr, seg_len, s.attr_str(), True))
core_segs.remove(ps)
merged = True
if not merged:
merged_segs.append((s.name, s.addr, len(s.data), s.attr_str(), False))
handlers = {}
handlers[GDBMIResultHandler.TAG] = GDBMIResultHandler(verbose=False)
handlers[GDBMIStreamConsoleHandler.TAG] = GDBMIStreamConsoleHandler(None, verbose=False)
p = gdbmi_start(handlers, [rom_sym_cmd])
extra_note = None
task_info = []
for seg in core_elf.aux_segments:
if seg.type != ESPCoreDumpElfFile.PT_NOTE:
continue
note_read = 0
while note_read < len(seg.data):
note = Elf32NoteDesc("", 0, None)
note_read += note.read(seg.data[note_read:])
if note.type == ESPCoreDumpLoader.ESP_CORE_DUMP_EXTRA_INFO_TYPE and 'EXTRA_INFO' in note.name:
extra_note = note
if note.type == ESPCoreDumpLoader.ESP_CORE_DUMP_TASK_INFO_TYPE and 'TASK_INFO' in note.name:
task_info_struct = EspCoreDumpTaskStatus(buf=note.desc)
task_info.append(task_info_struct)
print("===============================================================")
print("==================== ESP32 CORE DUMP START ====================")
handlers[GDBMIResultHandler.TAG].result_class = None
handlers[GDBMIStreamConsoleHandler.TAG].func = gdbmi_console_stream_handler
if extra_note:
extra_info = struct.unpack("<%dL" % (len(extra_note.desc) / struct.calcsize("<L")), extra_note.desc)
if extra_info[0] == ESPCoreDumpLoader.ESP_COREDUMP_CURR_TASK_MARKER:
print("\nCrashed task has been skipped.")
else:
p,task_name = gdbmi_freertos_get_task_name(p, extra_info[0])
print("\nCrashed task handle: 0x%x, name: '%s', GDB name: 'process %d'" % (extra_info[0], task_name, extra_info[0]))
print("\n================== CURRENT THREAD REGISTERS ===================")
if extra_note:
exccause = extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EXCCAUSE_IDX + 1]
exccause_str = xtensa_exception_cause_dict.get(exccause)
if not exccause_str:
exccause_str = ("Invalid EXCCAUSE code", "Invalid EXCAUSE description or not found.")
print("exccause 0x%x (%s)" % (exccause, exccause_str[0]))
print("excvaddr 0x%x" % extra_info[1 + 2 * ESPCoreDumpElfFile.REG_EXCVADDR_IDX + 1])
# skip crashed_task_tcb, exccause, and excvaddr
for i in range(5, len(extra_info), 2):
if (extra_info[i] >= ESPCoreDumpElfFile.REG_EPC1_IDX and extra_info[i] <= ESPCoreDumpElfFile.REG_EPC7_IDX):
print('epc%d 0x%x' % ((extra_info[i] - ESPCoreDumpElfFile.REG_EPC1_IDX + 1), extra_info[i + 1]))
# skip crashed_task_tcb, exccause, and excvaddr
for i in range(5, len(extra_info), 2):
if (extra_info[i] >= ESPCoreDumpElfFile.REG_EPS2_IDX and extra_info[i] <= ESPCoreDumpElfFile.REG_EPS7_IDX):
print('eps%d 0x%x' % ((extra_info[i] - ESPCoreDumpElfFile.REG_EPS2_IDX + 2), extra_info[i + 1]))
else:
print("Exception registers have not been found!")
p = gdbmi_getinfo(p, handlers, "info registers")
print("\n==================== CURRENT THREAD STACK =====================")
p = gdbmi_getinfo(p, handlers, "bt")
if task_info and task_info[0].task_flags != EspCoreDumpTaskStatus.TASK_STATUS_CORRECT:
print("The current crashed task is corrupted.")
print("Task #%d info: flags, tcb, stack (%x, %x, %x)." % (task_info[0].task_index,
task_info[0].task_flags,
task_info[0].task_tcb_addr,
task_info[0].task_stack_start))
print("\n======================== THREADS INFO =========================")
p = gdbmi_getinfo(p, handlers, "info threads")
# THREADS STACKS
p,threads,cur_thread = gdbmi_get_thread_ids(p)
print()
for thr_id in threads:
task_index = int(thr_id) - 1
p = gdbmi_switch_thread(p, thr_id)
p,thr_info_res = gdbmi_get_thread_info(p, thr_id)
if not thr_info_res.target_id:
print("WARNING: Unable to switch to thread %s\n" % thr_id)
continue
tcb_addr = gdb2freertos_thread_id(thr_info_res.target_id)
p,task_name = gdbmi_freertos_get_task_name(p, tcb_addr)
print("==================== THREAD %s (TCB: 0x%x, name: '%s') =====================" % (thr_id, tcb_addr, task_name))
p = gdbmi_getinfo(p, handlers, "bt")
if task_info and task_info[task_index].task_flags != EspCoreDumpTaskStatus.TASK_STATUS_CORRECT:
print("The task '%s' is corrupted." % thr_id)
print("Task #%d info: flags, tcb, stack (%x, %x, %x)." % (task_info[task_index].task_index,
task_info[task_index].task_flags,
task_info[task_index].task_tcb_addr,
task_info[task_index].task_stack_start))
print()
print("\n======================= ALL MEMORY REGIONS ========================")
print("Name Address Size Attrs")
for ms in merged_segs:
print("%s 0x%x 0x%x %s" % (ms[0], ms[1], ms[2], ms[3]))
for cs in core_segs:
# core dump exec segments are from ROM, other are belong to tasks (TCB or stack)
if cs.flags & ESPCoreDumpSegment.PF_X:
seg_name = 'rom.text'
else:
seg_name = 'tasks.data'
print(".coredump.%s 0x%x 0x%x %s" % (seg_name, cs.addr, len(cs.data), cs.attr_str()))
if args.print_mem:
print("\n====================== CORE DUMP MEMORY CONTENTS ========================")
for cs in core_elf.program_segments:
# core dump exec segments are from ROM, other are belong to tasks (TCB or stack)
if cs.flags & ESPCoreDumpSegment.PF_X:
seg_name = 'rom.text'
else:
seg_name = 'tasks.data'
print(".coredump.%s 0x%x 0x%x %s" % (seg_name, cs.addr, len(cs.data), cs.attr_str()))
p = gdbmi_getinfo(p, handlers, "x/%dx 0x%x" % (old_div(len(cs.data),4), cs.addr))
print("\n===================== ESP32 CORE DUMP END =====================")
print("===============================================================")
p.stdin.write(b'q\n')
p.wait()
p.stdin.close()
p.stdout.close()
if loader:
if not args.core and not args.save_core:
loader.remove_tmp_file(core_fname)
loader.cleanup()
print('Done!')
def main():
parser = argparse.ArgumentParser(description='espcoredump.py v%s - ESP32 Core Dump Utility' % __version__, prog='espcoredump')
parser.add_argument('--chip', '-c',
help='Target chip type',
choices=['auto', 'esp32'],
default=os.environ.get('ESPTOOL_CHIP', 'auto'))
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', esptool.ESPLoader.DEFAULT_PORT))
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate used when flashing/reading',
type=int,
default=os.environ.get('ESPTOOL_BAUD', esptool.ESPLoader.ESP_ROM_BAUD))
subparsers = parser.add_subparsers(
dest='operation',
help='Run coredumper {command} -h for additional help')
parser_debug_coredump = subparsers.add_parser(
'dbg_corefile',
help='Starts GDB debugging session with specified corefile')
parser_debug_coredump.add_argument('--debug', '-d', help='Log level (0..3)', type=int, default=3)
parser_debug_coredump.add_argument('--gdb', '-g', help='Path to gdb', default='xtensa-esp32-elf-gdb')
parser_debug_coredump.add_argument('--core', '-c', help='Path to core dump file (if skipped core dump will be read from flash)', type=str)
parser_debug_coredump.add_argument('--core-format', '-t', help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), '
'raw (raw) or base64-encoded (b64) binary',
choices=['b64', 'elf', 'raw'], type=str, default='elf')
parser_debug_coredump.add_argument('--off', '-o', help='Ofsset of coredump partition in flash '
'(type "make partition_table" to see).', type=int, default=None)
parser_debug_coredump.add_argument('--save-core', '-s', help='Save core to file. Othwerwise temporary core file will be deleted. '
'Ignored with "-c"', type=str)
parser_debug_coredump.add_argument('--rom-elf', '-r', help='Path to ROM ELF file.', type=str, default='esp32_rom.elf')
parser_debug_coredump.add_argument('prog', help='Path to program\'s ELF binary', type=str)
parser_info_coredump = subparsers.add_parser(
'info_corefile',
help='Print core dump info from file')
parser_info_coredump.add_argument('--debug', '-d', help='Log level (0..3)', type=int, default=3)
parser_info_coredump.add_argument('--gdb', '-g', help='Path to gdb', default='xtensa-esp32-elf-gdb')
parser_info_coredump.add_argument('--core', '-c', help='Path to core dump file (if skipped core dump will be read from flash)', type=str)
parser_info_coredump.add_argument('--core-format', '-t', help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), '
'raw (raw) or base64-encoded (b64) binary',
choices=['b64', 'elf', 'raw'], type=str, default='elf')
parser_info_coredump.add_argument('--off', '-o', help='Offset of coredump partition in flash (type '
'"make partition_table" to see).', type=int, default=None)
parser_info_coredump.add_argument('--save-core', '-s', help='Save core to file. Othwerwise temporary core file will be deleted. '
'Does not work with "-c"', type=str)
parser_info_coredump.add_argument('--rom-elf', '-r', help='Path to ROM ELF file.', type=str, default='esp32_rom.elf')
parser_info_coredump.add_argument('--print-mem', '-m', help='Print memory dump', action='store_true')
parser_info_coredump.add_argument('prog', help='Path to program\'s ELF binary', type=str)
# internal sanity check - every operation matches a module function of the same name
for operation in subparsers.choices:
assert operation in globals(), "%s should be a module function" % operation
args = parser.parse_args()
log_level = logging.CRITICAL
if args.debug == 0:
log_level = logging.CRITICAL
elif args.debug == 1:
log_level = logging.ERROR
elif args.debug == 2:
log_level = logging.WARNING
elif args.debug == 3:
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
print('espcoredump.py v%s' % __version__)
operation_func = globals()[args.operation]
operation_func(args)
if __name__ == '__main__':
try:
main()
except ESPCoreDumpError as e:
print('\nA fatal error occurred: %s' % e)
sys.exit(2)
| []
| []
| [
"IDF_PATH",
"ESPTOOL_BAUD",
"ESPTOOL_PORT",
"ESPTOOL_CHIP"
]
| [] | ["IDF_PATH", "ESPTOOL_BAUD", "ESPTOOL_PORT", "ESPTOOL_CHIP"] | python | 4 | 0 | |
meshcon/meshconnectord/snigate.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package meshconnectord
import (
"context"
"log"
"net"
"os"
"strings"
"time"
"github.com/GoogleCloudPlatform/cloud-run-mesh/pkg/hbone"
"github.com/GoogleCloudPlatform/cloud-run-mesh/pkg/mesh"
"github.com/GoogleCloudPlatform/cloud-run-mesh/pkg/sts"
"golang.org/x/net/http2"
corev1 "k8s.io/api/core/v1"
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type MeshConnector struct {
SNIListener net.Listener
H2RListener net.Listener
Auth *hbone.Auth
HBone *hbone.HBone
Mesh *mesh.KRun
Namespace string
ConfigMapName string
stop chan struct{}
Services map[string]*corev1.Service
EP map[string]*discoveryv1beta1.EndpointSlice
}
func New(kr *mesh.KRun) *MeshConnector {
return &MeshConnector{
Mesh: kr,
Namespace: "istio-system",
ConfigMapName: "mesh-env",
EP: map[string]*discoveryv1beta1.EndpointSlice{},
Services: map[string]*corev1.Service{},
stop: make(chan struct{}),
}
}
// InitSNIGate will start the mesh gateway, with a special SNI router port.
// The h2rPort is experimental, for dev/debug, for users running/debugging apps locally.
func (sg *MeshConnector) InitSNIGate(ctx context.Context, sniPort string, h2rPort string) error {
kr := sg.Mesh
// Locate a k8s cluster, load configs from env and from existing mesh-env.
err := kr.LoadConfig(ctx)
if err != nil {
return err
}
// If not explicitly disabled, attempt to find MCP tenant ID and enable MCP
if kr.MeshTenant != "-" {
err = sg.FindTenant(ctx)
if err != nil {
return err
}
}
// Default the XDSAddr for this instance to the service created by the hgate install.
// istiod.istio-system may not be created if 'revision install' is used.
if kr.XDSAddr == "" &&
(kr.MeshTenant == "" || kr.MeshTenant == "-") {
// Explicitly set XDSAddr, the gate should run in the same cluster
// with istiod (to forward to istiod), but will use the local in-cluster address.
kr.XDSAddr = "hgate-istiod.istio-system.svc:15012"
log.Println("MCP not detected, using hgate-istiod service", kr.MeshTenant)
}
if kr.MeshConnectorAddr == "" {
// We'll need to wait for it - is used when updating the config
err := sg.WaitService(ctx)
if err != nil {
return err
}
}
if kr.MeshConnectorInternalAddr == "" {
err := sg.WaitInternalService(ctx)
if err != nil {
return err
}
}
citadelRoot, err := sg.GetCARoot(ctx)
if err != nil {
return err
}
if citadelRoot != "" {
kr.CitadelRoot = citadelRoot
}
sg.NewWatcher()
if kr.Gateway == "" {
kr.Gateway = "hgate"
}
err = kr.StartIstioAgent()
if err != nil {
log.Fatal("Failed to start istio agent and envoy", err)
}
// Will use istio-agent created certs for now. WIP: run the
// gate without pilot-agent/envoy, will use built-in CA providers.
if sg.Auth == nil {
auth, err := hbone.NewAuthFromDir(kr.BaseDir + "/var/run/secrets/istio.io/")
if err != nil {
return err
}
// All namespaces are allowed to connect.
auth.AllowedNamespaces = []string{"*"}
sg.Auth = auth
}
h2r := hbone.New(sg.Auth)
sg.HBone = h2r
stsc, err := sts.NewSTS(kr)
if err != nil {
return err
}
tcache := sts.NewTokenCache(kr, stsc)
h2r.TokenCallback = tcache.Token
sg.updateMeshEnv(ctx)
h2r.EndpointResolver = func(sni string) *hbone.Endpoint {
// Current Istio SNI looks like:
//
// outbound_.9090_._.prometheus-1-prometheus.mon.svc.cluster.local
// We need to map it to a cloudrun external address, add token based on the audience, and make the call using
// the tunnel.
//
// Also supports the 'natural' form
//
//
parts := strings.Split(sni, ".")
remoteService := parts[0]
if parts[0] == "outbound_" {
remoteService = parts[3]
// TODO: extract 'version' from URL, convert it to cloudrun revision ?
// TODO: watcher on Service or ServiceEntry ( k8s or XDS ) to get annotation, allowing service name to be different
}
base := remoteService + ".a.run.app"
h2c := h2r.NewClient(sni)
//ep := h2c.NewEndpoint("https://" + base + "/_hbone/mtls")
ep := h2c.NewEndpoint("https://" + base + "/_hbone/15003")
ep.SNI = base
return ep
}
h2r.H2RCallback = func(s string, conn *http2.ClientConn) {
if s == "" {
return
}
log.Println("H2R connection event", s, conn)
// TODO: save a WorkloadInstance of EndpontSlice
}
sg.SNIListener, err = hbone.ListenAndServeTCP(sniPort, h2r.HandleSNIConn)
if err != nil {
return err
}
sg.H2RListener, err = hbone.ListenAndServeTCP(h2rPort, h2r.HandlerH2RConn)
if err != nil {
return err
}
return nil
}
func (sg *MeshConnector) GetCARoot(ctx context.Context) (string, error) {
// TODO: depending on error, move on or report a real error
kr := sg.Mesh
cm, err := kr.GetCM(ctx, "istio-system", "istio-ca-root-cert")
if err != nil {
if mesh.Is404(err) {
return "", nil
}
return "", err
} else {
// normally mounted to /var/run/secrets/istio
rootCert := cm["root-cert.pem"]
if rootCert == "" {
return "", nil
} else {
return rootCert, nil
}
}
}
// FindTenant will try to find the XDSAddr using in-cluster info.
// This is called after K8S client has been initialized.
//
// For MCP, will expect a config map named 'env-asm-managed'
// For in-cluster, we'll lookup the connector's LB, which points to istio.istio-system.svc
//
// This depends on MCP and Istiod internal configs - the config map may set with the XDS_ADDR and associated configs, in
// which case this will not be called.
func (sg *MeshConnector) FindTenant(ctx context.Context) error {
kr := sg.Mesh
if kr.ProjectNumber == "" {
log.Println("MCP requires PROJECT_NUMBER, attempting to use in-cluster")
return nil
}
cmname := os.Getenv("MCP_CONFIG")
if cmname == "" {
cmname = "env-asm-managed"
}
// TODO: find default tag, label, etc.
// Current code is written for MCP, use XDS_ADDR explicitly
// otherwise.
s, err := kr.Client.CoreV1().ConfigMaps("istio-system").Get(ctx,
cmname, metav1.GetOptions{})
if err != nil {
if mesh.Is404(err) {
return nil
}
return err
}
kr.MeshTenant = s.Data["CLOUDRUN_ADDR"]
log.Println("Istiod MCP discovered ", kr.MeshTenant, kr.XDSAddr,
kr.ProjectId, kr.ProjectNumber, kr.TrustDomain)
return nil
}
func (sg *MeshConnector) updateMeshEnv(ctx context.Context) error {
cmAPI := sg.Mesh.Client.CoreV1().ConfigMaps(sg.Namespace)
cm, err := cmAPI.Get(ctx, "mesh-env", metav1.GetOptions{})
if err != nil {
if !mesh.Is404(err) {
return err
}
// Not found, create:
cm = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "mesh-env",
Namespace: "istio-system",
},
Data: map[string]string{},
}
sg.Mesh.SaveToMap(cm.Data)
_, err = cmAPI.Create(ctx, cm, metav1.CreateOptions{})
if err != nil {
log.Println("Failed to update config map, skipping ", err)
}
return nil
}
if !sg.Mesh.SaveToMap(cm.Data) {
return nil
}
_, err = cmAPI.Update(ctx, cm, metav1.UpdateOptions{})
if err != nil {
log.Println("Failed to update config map, skipping ", err)
} else {
log.Println("Update mesh env with defaults")
}
return nil
}
// Wait for the hgate and internal hgate service, set the config
func (sg *MeshConnector) WaitService(ctx context.Context) error {
for {
if ctx.Err() != nil {
return ctx.Err()
}
kr := sg.Mesh
ts, err := kr.Client.CoreV1().Services("istio-system").Get(ctx, "hgate", metav1.GetOptions{})
if err != nil {
if !mesh.Is404(err) {
log.Println("Error getting service hgate ", err)
return err
}
}
if ts != nil && len(ts.Status.LoadBalancer.Ingress) > 0 {
sg.Mesh.MeshConnectorAddr = ts.Status.LoadBalancer.Ingress[0].IP
return nil
}
time.Sleep(200 * time.Millisecond)
}
}
func (sg *MeshConnector) WaitInternalService(ctx context.Context) error {
for {
if ctx.Err() != nil {
return ctx.Err()
}
kr := sg.Mesh
ts, err := kr.Client.CoreV1().Services("istio-system").Get(ctx, "internal-hgate", metav1.GetOptions{})
if err != nil {
if !mesh.Is404(err) {
log.Println("Error getting service hgate ", err)
return err
}
}
if ts != nil && len(ts.Status.LoadBalancer.Ingress) > 0 {
sg.Mesh.MeshConnectorInternalAddr = ts.Status.LoadBalancer.Ingress[0].IP
return nil
}
time.Sleep(200 * time.Millisecond)
}
}
| [
"\"MCP_CONFIG\""
]
| []
| [
"MCP_CONFIG"
]
| [] | ["MCP_CONFIG"] | go | 1 | 0 | |
mindmeld/components/_config.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Config class.
"""
import copy
import imp
import logging
import os
import warnings
from .schemas import validate_language_code, validate_locale_code
from .. import path
from ..constants import CURRENCY_SYMBOLS
logger = logging.getLogger(__name__)
DUCKLING_SERVICE_NAME = "duckling"
DEFAULT_DUCKLING_URL = "http://localhost:7151/parse"
CONFIG_DEPRECATION_MAPPING = {
"DOMAIN_CLASSIFIER_CONFIG": "DOMAIN_MODEL_CONFIG",
"INTENT_CLASSIFIER_CONFIG": "INTENT_MODEL_CONFIG",
"ENTITY_RECOGNIZER_CONFIG": "ENTITY_MODEL_CONFIG",
"ROLE_CLASSIFIER_CONFIG": "ROLE_MODEL_CONFIG",
"ENTITY_RESOLVER_CONFIG": "ENTITY_RESOLUTION_CONFIG",
"QUESTION_ANSWERER_CONFIG": "QUESTION_ANSWERING_CONFIG",
"get_entity_recognizer_config": "get_entity_model_config",
"get_intent_classifier_config": "get_intent_model_config",
"get_entity_resolver_config": "get_entity_resolution_model_config",
"get_role_classifier_config": "get_role_model_config",
}
DEFAULT_DOMAIN_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {"classifier_type": "logreg"},
"param_selection": {
"type": "k-fold",
"k": 10,
"grid": {"fit_intercept": [True, False], "C": [10, 100, 1000, 10000, 100000]},
},
"features": {"bag-of-words": {"lengths": [1]}, "freq": {"bins": 5}, "in-gaz": {}},
}
DEFAULT_INTENT_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {"classifier_type": "logreg"},
"param_selection": {
"type": "k-fold",
"k": 10,
"grid": {
"fit_intercept": [True, False],
"C": [0.01, 1, 100, 10000, 1000000],
"class_bias": [1, 0.7, 0.3, 0],
},
},
"features": {
"bag-of-words": {"lengths": [1]},
"in-gaz": {},
"freq": {"bins": 5},
"length": {},
},
}
DEFAULT_ENTITY_RECOGNIZER_CONFIG = {
"model_type": "tagger",
"label_type": "entities",
"model_settings": {
"classifier_type": "memm",
"tag_scheme": "IOB",
"feature_scaler": "max-abs",
},
"param_selection": {
"type": "k-fold",
"k": 5,
"scoring": "accuracy",
"grid": {
"penalty": ["l1", "l2"],
"C": [0.01, 1, 100, 10000, 1000000, 100000000],
},
},
"features": {
"bag-of-words-seq": {
"ngram_lengths_to_start_positions": {
1: [-2, -1, 0, 1, 2],
2: [-2, -1, 0, 1],
}
},
"in-gaz-span-seq": {},
"sys-candidates-seq": {"start_positions": [-1, 0, 1]},
},
}
DEFAULT_ENTITY_RESOLVER_CONFIG = {
"model_type": "resolver",
"model_settings": {"resolver_type": "text_relevance", },
}
DEFAULT_ROLE_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {"classifier_type": "logreg"},
"params": {"C": 100, "penalty": "l1"},
"features": {
"bag-of-words-before": {
"ngram_lengths_to_start_positions": {1: [-2, -1], 2: [-2, -1]}
},
"bag-of-words-after": {
"ngram_lengths_to_start_positions": {1: [0, 1], 2: [0, 1]}
},
"other-entities": {},
},
}
DEFAULT_QUESTION_ANSWERER_CONFIG = {
"model_type": "elasticsearch",
"model_settings": {
"query_type": "keyword"
}
}
ENGLISH_LANGUAGE_CODE = "en"
ENGLISH_US_LOCALE = "en_US"
DEFAULT_LANGUAGE_CONFIG = {
"language": ENGLISH_LANGUAGE_CODE,
"locale": ENGLISH_US_LOCALE,
}
# ElasticSearch mapping to define text analysis settings for text fields.
# It defines specific index configuration for synonym indices. The common index configuration
# is in default index template.
DEFAULT_ES_SYNONYM_MAPPING = {
"mappings": {
"properties": {
"sort_factor": {"type": "double"},
"whitelist": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
}
}
}
PHONETIC_ES_SYNONYM_MAPPING = {
"mappings": {
"properties": {
"sort_factor": {"type": "double"},
"whitelist": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
"double_metaphone": {
"type": "text",
"analyzer": "phonetic_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
"cname": {
"type": "text",
"analyzer": "default_analyzer",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {"type": "text", "analyzer": "char_ngram_analyzer", },
"double_metaphone": {
"type": "text",
"analyzer": "phonetic_analyzer",
},
},
},
}
},
"settings": {
"analysis": {
"filter": {
"phonetic_filter": {
"type": "phonetic",
"encoder": "doublemetaphone",
"replace": True,
"max_code_len": 7,
}
},
"analyzer": {
"phonetic_analyzer": {
"filter": [
"lowercase",
"asciifolding",
"token_shingle",
"phonetic_filter",
],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
"remove_dot",
],
"type": "custom",
"tokenizer": "whitespace",
}
},
}
},
}
DEFAULT_ES_INDEX_TEMPLATE_NAME = "mindmeld_default"
# Default ES index template that contains the base index configuration shared across different
# types of indices. Currently all ES indices will be created using this template.
# - custom text analysis settings such as custom analyzers, token filters and character filters.
# - dynamic field mapping template for text fields
# - common fields, e.g. id.
DEFAULT_ES_INDEX_TEMPLATE = {
"template": "*",
"mappings": {
"dynamic_templates": [
{
"default_text": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "text",
"analyzer": "default_analyzer",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"processed_text": {"type": "text", "analyzer": "english", },
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
},
}
}
],
"properties": {"id": {"type": "keyword"}},
},
"settings": {
"analysis": {
"char_filter": {
"remove_loose_apostrophes": {
"pattern": " '|' ",
"type": "pattern_replace",
"replacement": "",
},
"space_possessive_apostrophes": {
"pattern": "([^\\p{N}\\s]+)'s ",
"type": "pattern_replace",
"replacement": "$1 's ",
},
"remove_special_beginning": {
"pattern": "^[^\\p{L}\\p{N}\\p{Sc}&']+",
"type": "pattern_replace",
"replacement": "",
},
"remove_special_end": {
"pattern": "[^\\p{L}\\p{N}&']+$",
"type": "pattern_replace",
"replacement": "",
},
"remove_special1": {
"pattern": "([\\p{L}]+)[^\\p{L}\\p{N}&']+(?=[\\p{N}\\s]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_special2": {
"pattern": "([\\p{N}]+)[^\\p{L}\\p{N}&']+(?=[\\p{L}\\s]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_special3": {
"pattern": "([\\p{L}]+)[^\\p{L}\\p{N}&']+(?=[\\p{L}]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_comma": {
"pattern": ",",
"type": "pattern_replace",
"replacement": "",
},
"remove_tm_and_r": {
"pattern": "™|®",
"type": "pattern_replace",
"replacement": "",
},
"remove_dot": {
"pattern": "([\\p{L}]+)[.]+(?=[\\p{L}\\s]+)",
"type": "pattern_replace",
"replacement": "$1",
},
},
"filter": {
"token_shingle": {
"max_shingle_size": "4",
"min_shingle_size": "2",
"output_unigrams": "true",
"type": "shingle",
},
"ngram_filter": {"type": "ngram", "min_gram": "3", "max_gram": "3"},
},
"analyzer": {
"default_analyzer": {
"filter": ["lowercase", "asciifolding", "token_shingle"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "whitespace",
},
"keyword_match_analyzer": {
"filter": ["lowercase", "asciifolding"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "keyword",
},
"char_ngram_analyzer": {
"filter": ["lowercase", "asciifolding", "ngram_filter"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "whitespace",
},
},
}
},
}
# Elasticsearch mapping to define knowledge base index specific configuration:
# - dynamic field mapping to index all synonym whitelist in fields with "$whitelist" suffix.
# - location field
#
# The common configuration is defined in default index template
DEFAULT_ES_QA_MAPPING = {
"mappings": {
"dynamic_templates": [
{
"synonym_whitelist_text": {
"match": "*$whitelist",
"match_mapping_type": "object",
"mapping": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
}
}
],
"properties": {"location": {"type": "geo_point"}},
}
}
DEFAULT_PARSER_DEPENDENT_CONFIG = {
"left": True,
"right": True,
"min_instances": 0,
"max_instances": None,
"precedence": "left",
"linking_words": frozenset(),
}
DEFAULT_RANKING_CONFIG = {"query_clauses_operator": "or"}
DEFAULT_NLP_CONFIG = {
"resolve_entities_using_nbest_transcripts": [],
"system_entity_recognizer": {
"type": DUCKLING_SERVICE_NAME,
"url": DEFAULT_DUCKLING_URL,
},
}
DEFAULT_AUGMENTATION_CONFIG = {
"augmentor_class": "EnglishParaphraser",
"batch_size": 8,
"paths": [{"domains": ".*", "intents": ".*", "files": ".*", }],
"path_suffix": "-augment.txt",
}
DEFAULT_AUTO_ANNOTATOR_CONFIG = {
"annotator_class": "MultiLingualAnnotator",
"overwrite": False,
"annotation_rules": [
{"domains": ".*", "intents": ".*", "files": ".*", "entities": ".*", }
],
"unannotate_supported_entities_only": True,
"unannotation_rules": None,
"translator": "NoOpTranslator",
}
DEFAULT_TOKENIZER_CONFIG = {
# populated in the `get_tokenizer_config` func
"allowed_patterns": [],
"tokenizer": "WhiteSpaceTokenizer",
"normalizer": "ASCIIFold",
}
DEFAULT_ACTIVE_LEARNING_CONFIG = {
"output_folder": None,
"pre_tuning": {
"train_pattern": ".*train.*.txt",
"test_pattern": ".*test.*.txt",
"train_seed_pct": 0.20,
},
"tuning": {
"n_classifiers": 3,
"n_epochs": 5,
"batch_size": 100,
"tuning_level": "domain",
"tuning_strategies": [
"LeastConfidenceSampling",
"MarginSampling",
"EntropySampling",
"RandomSampling",
"DisagreementSampling",
"EnsembleSampling",
"KLDivergenceSampling",
],
},
"tuning_output": {
"save_sampled_queries": True,
"aggregate_statistic": "accuracy",
"class_level_statistic": "f_beta",
},
"query_selection": {
"selection_strategy": "EntropySampling",
"log_usage_pct": 1.00,
"labeled_logs_pattern": None,
"unlabeled_logs_path": "logs.txt",
},
}
class NlpConfigError(Exception):
pass
def get_custom_action_config(app_path):
if not app_path:
return None
try:
custom_action_config = getattr(
_get_config_module(app_path), "CUSTOM_ACTION_CONFIG", None
)
return custom_action_config
except (OSError, IOError):
logger.info("No app configuration file found.")
return None
def get_max_history_len(app_path):
if not app_path:
return None
try:
custom_action_config = getattr(
_get_config_module(app_path), "MAX_HISTORY_LEN", None
)
return custom_action_config
except (OSError, IOError):
logger.info("No app configuration file found.")
return None
def get_language_config(app_path):
if not app_path:
return ENGLISH_LANGUAGE_CODE, ENGLISH_US_LOCALE
try:
language_config = getattr(
_get_config_module(app_path), "LANGUAGE_CONFIG", DEFAULT_LANGUAGE_CONFIG
)
locale = language_config.get("locale")
language = language_config.get("language")
resolved_language = resolve_language(language, locale)
return resolved_language, locale
except (OSError, IOError):
logger.info(
"No app configuration file found. Using default language and locale."
)
return ENGLISH_LANGUAGE_CODE, ENGLISH_US_LOCALE
def resolve_language(language=None, locale=None):
"""
Resolves to a language given a locale.
"""
locale = validate_locale_code(locale)
language = validate_language_code(language)
# Locale overrides language
if locale:
language = locale.split("_")[0]
if not language:
language = ENGLISH_LANGUAGE_CODE
return language.lower()
def get_app_namespace(app_path):
"""Returns the namespace of the application at app_path"""
try:
_app_namespace = _get_config_module(app_path).APP_NAMESPACE
if "JUPYTER_USER" in os.environ:
_app_namespace = "{}_{}".format(os.environ["JUPYTER_USER"], _app_namespace)
return _app_namespace
except (OSError, IOError):
logger.debug("No app configuration file found")
except AttributeError:
logger.debug("App namespace not set in app configuration")
# If a relative path is passed in, we resolve to its abspath
app_path = os.path.abspath(app_path) if not os.path.isabs(app_path) else app_path
_app_namespace = os.path.split(app_path)[1]
if "JUPYTER_USER" in os.environ:
_app_namespace = "{jupyter_user}_{app_namespace}".format(
jupyter_user=os.environ["JUPYTER_USER"], app_namespace=_app_namespace
)
return _app_namespace
def is_duckling_configured(app_path):
"""Returns True if the app config specifies that duckling should be run
as a system entity recognizer
Args:
app_path (str): A application path
Returns:
(bool): True if the app config specifies that the numerical parsing
should be run
"""
if not app_path:
raise NlpConfigError("Application path is not valid")
config = get_nlp_config(app_path).get("system_entity_recognizer")
if isinstance(config, dict):
# We get into this conditional when the app has specified the system_entity_recognizer
# nlp config
return config.get("type") == DUCKLING_SERVICE_NAME
else:
# We get into this conditional when the app has not specified the system_entity_recognizer
# nlp config, in which case, we default to the duckling API
return True
def get_system_entity_url_config(app_path):
"""
Get system entity url from the application's config. If the application does not define the url,
return the default duckling url.
"""
if not app_path:
raise NlpConfigError("Application path is not valid")
return (
get_nlp_config(app_path)
.get("system_entity_recognizer", {})
.get("url", DEFAULT_DUCKLING_URL)
)
def get_classifier_config(
clf_type, app_path=None, domain=None, intent=None, entity=None
):
"""Returns the config for the specified classifier, with the
following order of precedence.
If the application contains a config.py file:
- Return the response from the get_*_model_config function in
config.py for the specified classifier type. E.g.
`get_intent_model_config`.
- If the function does not exist, or raise an exception, return the
config specified by *_MODEL_CONFIG in config.py, e.g.
INTENT_MODEL_CONFIG.
Otherwise, use the MindMeld default config for the classifier type
Args:
clf_type (str): The type of the classifier. One of 'domain',
'intent', 'entity', 'entity_resolution', or 'role'.
app_path (str, optional): The location of the app
domain (str, optional): The domain of the classifier
intent (str, optional): The intent of the classifier
entity (str, optional): The entity type of the classifier
Returns:
dict: A classifier config
"""
try:
module_conf = _get_config_module(app_path)
except (TypeError, OSError, IOError):
logger.info(
"No app configuration file found. Using default %s model configuration",
clf_type,
)
return _get_default_classifier_config(clf_type)
func_name = {
"intent": "get_intent_classifier_config",
"entity": "get_entity_recognizer_config",
"entity_resolution": "get_entity_resolver_config",
"role": "get_role_classifier_config",
}.get(clf_type)
func_args = {
"intent": ("domain",),
"entity": ("domain", "intent"),
"entity_resolution": ("domain", "intent", "entity"),
"role": ("domain", "intent", "entity"),
}.get(clf_type)
if func_name:
func = None
try:
func = getattr(module_conf, func_name)
except AttributeError:
try:
func = getattr(module_conf, CONFIG_DEPRECATION_MAPPING[func_name])
msg = (
"%s config key is deprecated. Please use the equivalent %s config "
"key" % (CONFIG_DEPRECATION_MAPPING[func_name], func_name)
)
warnings.warn(msg, DeprecationWarning)
except AttributeError:
pass
if func:
try:
raw_args = {"domain": domain, "intent": intent, "entity": entity}
args = {k: raw_args[k] for k in func_args}
return copy.deepcopy(func(**args))
except Exception as exc: # pylint: disable=broad-except
# Note: this is intentionally broad -- provider could raise any exception
logger.warning(
"%r configuration provider raised exception: %s", clf_type, exc
)
attr_name = {
"domain": "DOMAIN_CLASSIFIER_CONFIG",
"intent": "INTENT_CLASSIFIER_CONFIG",
"entity": "ENTITY_RECOGNIZER_CONFIG",
"entity_resolution": "ENTITY_RESOLVER_CONFIG",
"role": "ROLE_CLASSIFIER_CONFIG",
"question_answering": "QUESTION_ANSWERER_CONFIG",
}[clf_type]
try:
return copy.deepcopy(getattr(module_conf, attr_name))
except AttributeError:
try:
result = copy.deepcopy(
getattr(module_conf, CONFIG_DEPRECATION_MAPPING[attr_name])
)
msg = (
"%s config is deprecated. Please use the equivalent %s config "
"key" % (CONFIG_DEPRECATION_MAPPING[attr_name], attr_name)
)
warnings.warn(msg, DeprecationWarning)
return result
except AttributeError:
logger.info("No %s model configuration set. Using default.", clf_type)
return _get_default_classifier_config(clf_type)
def _get_default_classifier_config(clf_type):
return copy.deepcopy(
{
"domain": DEFAULT_DOMAIN_CLASSIFIER_CONFIG,
"intent": DEFAULT_INTENT_CLASSIFIER_CONFIG,
"entity": DEFAULT_ENTITY_RECOGNIZER_CONFIG,
"entity_resolution": DEFAULT_ENTITY_RESOLVER_CONFIG,
"role": DEFAULT_ROLE_CLASSIFIER_CONFIG,
"language_config": DEFAULT_LANGUAGE_CONFIG,
"question_answering": DEFAULT_QUESTION_ANSWERER_CONFIG,
}[clf_type]
)
def get_parser_config(app_path=None, config=None, domain=None, intent=None):
"""Gets the fully specified parser configuration for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
config (dict, optional): A config object to use. This will
override the config specified by the app's config.py file.
If necessary, this object will be expanded to a fully
specified config object.
domain (str, optional): The domain of the parser
intent (str, optional): The intent of the parser
Returns:
dict: A fully parser configuration
"""
if config:
return _expand_parser_config(config)
if not app_path:
raise NlpConfigError("Application path is not valid")
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info("No app configuration file found. Not configuring parser.")
return _get_default_parser_config()
# Try provider first
config_provider = None
try:
config_provider = module_conf.get_parser_config
except AttributeError:
pass
if config_provider:
try:
config = config or config_provider(domain, intent)
return _expand_parser_config(config)
except Exception as exc: # pylint: disable=broad-except
# Note: this is intentionally broad -- provider could raise any exception
logger.warning("Parser configuration provider raised exception: %s", exc)
# Try object second
try:
config = config or module_conf.PARSER_CONFIG
return _expand_parser_config(config)
except AttributeError:
pass
return _get_default_parser_config()
def _get_default_parser_config():
return None
def _expand_parser_config(config):
# Replace with -- since | has a special meaning for parser
return {
head.replace("|", "--"): _expand_group_config(group)
for head, group in config.items()
}
def _expand_group_config(group_config):
"""Expands a parser group configuration.
A group config can either be a list of dependents or a dictionary with a
field for each dependent.
In the list a dependent can be a string containing the name of the
entity-role type identifier or a dictionary with at least a type field.
In the dictionary the dependent must be another dictionary.
Some example parser configs follow below.
A very simple configuration:
{
'head': ['dependent']
}
A more realistic simple config:
{
'product|beverage': ['size', 'quantity', 'option|beverage'],
'product|baked-good': ['size', 'quantity', 'option|baked-good'],
'store': ['location'],
'option': ['size']
}
A fully specified config:
{
'product': {
'quantity': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 3
},
'size': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
},
'option': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
},
'store': {
'location': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
},
'option': {
'size': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
}
}
"""
group_config = copy.deepcopy(group_config)
expanded = {}
if isinstance(group_config, (tuple, list, set)):
for dependent in group_config:
config = copy.copy(DEFAULT_PARSER_DEPENDENT_CONFIG)
try:
dep_type = dependent.pop("type")
config.update(dependent)
except (AttributeError, ValueError):
# simple style config -- dependent is a str
dep_type = dependent
# Replace with -- since | has a special meaning for parser
expanded[dep_type.replace("|", "--")] = config
else:
for dep_type, dep_config in group_config.items():
config = copy.copy(DEFAULT_PARSER_DEPENDENT_CONFIG)
dep_config.pop("type", None)
config.update(dep_config)
# Replace with -- since | has a special meaning for parser
expanded[dep_type.replace("|", "--")] = config
return expanded
def _get_config_module(app_path):
module_path = path.get_config_module_path(app_path)
config_module = imp.load_source(
"config_module_" + os.path.basename(app_path), module_path
)
return config_module
def _get_default_nlp_config():
return copy.deepcopy(DEFAULT_NLP_CONFIG)
def get_nlp_config(app_path=None, config=None):
"""Gets the fully specified processor configuration for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
config (dict, optional): A config object to use. This will
override the config specified by the app's config.py file.
If necessary, this object will be expanded to a fully
specified config object.
Returns:
dict: The nbest inference configuration
"""
if config:
return config
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info("No app configuration file found. Using default nlp config.")
return _get_default_nlp_config()
# Try provider first
try:
return copy.deepcopy(module_conf.get_nlp_config())
except AttributeError:
pass
# Try object second
try:
config = config or module_conf.NLP_CONFIG
return config
except AttributeError:
pass
return _get_default_nlp_config()
def get_augmentation_config(app_path=None):
"""Gets the augmentation config for the app.
Args:
app_path (str, optional): The location of the MindMeld app.
Returns:
dict: The augmentation config.
"""
try:
augmentation_config = getattr(
_get_config_module(app_path),
"AUGMENTATION_CONFIG",
DEFAULT_AUGMENTATION_CONFIG,
)
return augmentation_config
except (OSError, IOError, AttributeError):
logger.info(
"No app configuration file found. Using the default augmentation config."
)
return DEFAULT_AUGMENTATION_CONFIG
def get_auto_annotator_config(app_path=None):
"""Gets the automatic annotator config for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
Returns:
dict: The automatic annotator config.
"""
try:
auto_annotator_config = getattr(
_get_config_module(app_path),
"AUTO_ANNOTATOR_CONFIG",
DEFAULT_AUTO_ANNOTATOR_CONFIG,
)
return auto_annotator_config
except (OSError, IOError):
logger.info(
"No app configuration file found. Using the default automatic annotator config."
)
return DEFAULT_AUTO_ANNOTATOR_CONFIG
def _get_default_regex(exclude_from_norm):
"""Gets the default special character regex for the Tokenizer config.
Args:
exclude_from_norm (optional) - list of chars to exclude from normalization
Returns:
list: default special character regex list
"""
# List of regex's for matching and tokenizing when keep_special_chars=True
keep_special_regex_list = []
exception_chars = "\@\[\]\|\{\}'" # noqa: W605
to_exclude = CURRENCY_SYMBOLS + "".join(exclude_from_norm or [])
letter_pattern_str = "[^\W\d_]+" # noqa: W605
# Make keep special regex list
keep_special_regex_list.append(
"?P<start>^[^\w\d&" + to_exclude + exception_chars + "]+" # noqa: W605
)
keep_special_regex_list.append(
"?P<end>[^\w\d&" + to_exclude + exception_chars + "]+$" # noqa: W605
)
keep_special_regex_list.append(
"?P<pattern1>(?P<pattern1_replace>" # noqa: W605
+ letter_pattern_str
+ ")"
+ "[^\w\d\s&" # noqa: W605
+ exception_chars
+ "]+(?=[\d]+)" # noqa: W605
)
keep_special_regex_list.append(
"?P<pattern2>(?P<pattern2_replace>[\d]+)[^\w\d\s&" # noqa: W605
+ exception_chars
+ "]+"
+ "u(?="
+ letter_pattern_str
+ ")"
)
keep_special_regex_list.append(
"?P<pattern3>(?P<pattern3_replace>"
+ letter_pattern_str
+ ")" # noqa: W605
+ "[^\w\d\s&" # noqa: W605
+ exception_chars
+ "]+"
+ "(?=" # noqa: W605
+ letter_pattern_str
+ ")"
)
keep_special_regex_list.append(
"?P<escape1>(?P<escape1_replace>[\w\d]+)" # noqa: W605
+ "[^\w\d\s" # noqa: W605
+ exception_chars
+ "]+"
+ "(?=\|)" # noqa: W605
)
keep_special_regex_list.append(
"?P<escape2>(?P<escape2_replace>[\]\}]+)" # noqa: W605
+ "[^\w\d\s" # noqa: W605
+ exception_chars
+ "]+(?=s)"
)
keep_special_regex_list.append("?P<underscore>_") # noqa: W605
keep_special_regex_list.append("?P<begspace>^\s+") # noqa: W605
keep_special_regex_list.append("?P<trailspace>\s+$") # noqa: W605
keep_special_regex_list.append("?P<spaceplus>\s+") # noqa: W605
keep_special_regex_list.append("?P<apos_space> '|' ") # noqa: W605
keep_special_regex_list.append("?P<apos_s>(?<=[^\\s])'[sS]") # noqa: W605
# handle the apostrophes used at the end of a possessive form, e.g. dennis'
keep_special_regex_list.append("?P<apos_poss>(^'(?=\S)|(?<=\S)'$)") # noqa: W605
return keep_special_regex_list
def get_tokenizer_config(app_path=None, exclude_from_norm=None):
"""Gets the tokenizer configuration for the app at the specified path.
Args:
app_path (str, optional): The location of the MindMeld app
exclude_from_norm (list, optional): chars to exclude from normalization
Returns:
dict: The tokenizer configuration.
"""
DEFAULT_TOKENIZER_CONFIG["default_allowed_patterns"] = _get_default_regex(
exclude_from_norm
)
if not app_path:
return DEFAULT_TOKENIZER_CONFIG
try:
tokenizer_config = getattr(
_get_config_module(app_path), "TOKENIZER_CONFIG", DEFAULT_TOKENIZER_CONFIG
)
if not tokenizer_config.get("allowed_patterns"):
# If allowed_patterns are not provided, use default
tokenizer_config["allowed_patterns"] = []
tokenizer_config["default_allowed_patterns"] = _get_default_regex(
exclude_from_norm
)
return tokenizer_config
except (OSError, IOError, AttributeError):
logger.info("No app configuration file found. Using default tokenizer config.")
return DEFAULT_TOKENIZER_CONFIG
def get_active_learning_config(app_path=None):
"""Gets the active learning configuration for the app at the specified path.
Args:
app_path (str, optional): The location of the MindMeld app
Returns:
dict: The active learning configuration.
"""
if not app_path:
return DEFAULT_ACTIVE_LEARNING_CONFIG
try:
active_learning_config = getattr(
_get_config_module(app_path),
"ACTIVE_LEARNING_CONFIG",
DEFAULT_ACTIVE_LEARNING_CONFIG,
)
return active_learning_config
except (OSError, IOError, AttributeError):
logger.info("No app configuration file found.")
return DEFAULT_ACTIVE_LEARNING_CONFIG
| []
| []
| [
"JUPYTER_USER"
]
| [] | ["JUPYTER_USER"] | python | 1 | 0 | |
main.go | // Package main provides all the functionality for GoThanks.
// GoThanks automatically stars Go's official repository and your go.mod github dependencies,
// providing a simple way to to say thanks to the maintainers of the modules you use and the contributors of Go itself.
//
// Usage:
//
// In order to run GoThanks you need to have a valid Github Access Token.
// You can pass the token as an argument to GoThanks or store it in an environmental variable named GITHUB_TOKEN.
//
// Inside the folder where your go.mod lives run:
//
// $ ./gothanks -github-token=xxxxxx
//
// or
//
// $ export GITHUB_TOKEN=xxxxx
// $ ./gothanks
package main
import (
"bufio"
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
"sync"
"github.com/google/go-github/v28/github"
"github.com/sirkon/goproxy/gomod"
"golang.org/x/oauth2"
)
func main() {
githubToken := flag.String("github-token", os.Getenv("GITHUB_TOKEN"), "Github access token")
assumeYes := flag.Bool("y", false, "Automatic yes to prompts and run non-interactively.")
flag.Parse()
if *githubToken == "" {
fmt.Println("no Github token found")
os.Exit(-1)
}
dir, err := os.Getwd()
if err != nil {
fmt.Println(err)
os.Exit(-1)
}
input, err := ioutil.ReadFile(dir + "/go.mod")
if err != nil {
fmt.Println(err)
os.Exit(-1)
}
fmt.Println("Welcome to GoThanks :)")
// Asks user what he wants to do
if *assumeYes == false {
fmt.Println("\nYou are about to star you beloved dependencies.\n\nPress y to continue or n to abort")
confirmed, err := confirm(os.Stdin)
if err != nil {
fmt.Println(err)
os.Exit(-1)
}
if !confirmed {
fmt.Println("Aborting.")
os.Exit(0)
}
}
parseResult, err := gomod.Parse("", input)
if err != nil {
fmt.Println(err)
os.Exit(-1)
}
ctx := context.Background()
client := githubClient(ctx, *githubToken)
// Always send your love to Go!
repos := []string{"github.com/golang/go"}
for dep := range parseResult.Require {
repos = append(repos, dep)
}
fmt.Print("\nSending your love..\n\n")
var wg sync.WaitGroup
for _, dep := range repos {
wg.Add(1)
go func(dep string, wg *sync.WaitGroup) {
defer wg.Done()
if rep, ok := isGithubRepo(dep); ok {
x, _, err := client.Activity.IsStarred(ctx, rep.owner, rep.repo)
if err != nil {
fmt.Println(err)
return
}
if x {
fmt.Printf("Repository %s is already starred!\n", rep.path)
return
}
fmt.Printf("Sending a star to %s\n", rep.path)
_, err = client.Activity.Star(ctx, rep.owner, rep.repo)
if err != nil {
fmt.Printf("Could not star %s %s\n", rep.path, err)
}
}
}(dep, &wg)
}
wg.Wait()
fmt.Println("\nThank you!")
}
type githubRepo struct {
path string
owner string
repo string
}
func githubClient(ctx context.Context, token string) *github.Client {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
tc := oauth2.NewClient(ctx, ts)
return github.NewClient(tc)
}
func confirm(in io.Reader) (bool, error) {
reader := bufio.NewReader(in)
char, _, err := reader.ReadRune()
if err != nil {
return false, err
}
if i := strings.ToLower(strings.TrimSpace(string(char))); i == "y" {
return true, nil
}
return false, nil
}
func isGithubRepo(path string) (githubRepo, bool) {
// Make sure we do not forget to star the Github mirrors of Go's subpackages
path = strings.Replace(path, "golang.org/x/", "github.com/golang/", -1)
re := regexp.MustCompile(`^github\.com\/[a-zA-Z\d-]+\/[a-zA-Z\d-_\.]+`)
repoPath := re.FindString(path)
if repoPath != "" {
parts := strings.Split(repoPath, "/")
res := githubRepo{
path: repoPath,
owner: parts[1],
repo: parts[2],
}
return res, true
}
return githubRepo{}, false
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
fairseq/examples/speech_recognition/new/infer.py | #!/usr/bin/env python -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import hashlib
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import editdistance
import torch
import torch.distributed as dist
from examples.speech_recognition.new.decoders.decoder_config import (
DecoderConfig,
FlashlightDecoderConfig,
)
from examples.speech_recognition.new.decoders.decoder import Decoder
from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
FairseqDataclass,
)
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.logging.progress_bar import BaseProgressBar
from fairseq.models.fairseq_model import FairseqModel
from omegaconf import OmegaConf
import hydra
from hydra.core.config_store import ConfigStore
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(__file__).resolve().parent / "conf"
@dataclass
class DecodingConfig(DecoderConfig, FlashlightDecoderConfig):
unique_wer_file: bool = field(
default=False,
metadata={"help": "If set, use a unique file for storing WER"},
)
results_path: Optional[str] = field(
default=None,
metadata={
"help": "If set, write hypothesis and reference sentences into this directory"
},
)
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
decoding: DecodingConfig = DecodingConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
class InferenceProcessor:
cfg: InferConfig
def __init__(self, cfg: InferConfig) -> None:
self.cfg = cfg
self.task = tasks.setup_task(cfg.task)
self.tgt_dict = self.task.target_dictionary
models, saved_cfg = self.load_model_ensemble()
self.models = models
self.saved_cfg = saved_cfg
self.task.load_dataset(
self.cfg.dataset.gen_subset,
task_cfg=saved_cfg.task,
)
self.generator = Decoder(cfg.decoding, self.tgt_dict)
self.gen_timer = StopwatchMeter()
self.wps_meter = TimeMeter()
self.num_sentences = 0
self.total_errors = 0
self.total_length = 0
self.hypo_words_file = None
self.hypo_units_file = None
self.ref_words_file = None
self.ref_units_file = None
self.progress_bar = self.build_progress_bar()
def __enter__(self) -> "InferenceProcessor":
if self.cfg.decoding.results_path is not None:
self.hypo_words_file = self.get_res_file("hypo.word")
self.hypo_units_file = self.get_res_file("hypo.units")
self.ref_words_file = self.get_res_file("ref.word")
self.ref_units_file = self.get_res_file("ref.units")
return self
def __exit__(self, *exc) -> bool:
if self.cfg.decoding.results_path is not None:
self.hypo_words_file.close()
self.hypo_units_file.close()
self.ref_words_file.close()
self.ref_units_file.close()
return False
def __iter__(self) -> Any:
for sample in self.progress_bar:
if not self.cfg.common.cpu:
sample = utils.move_to_cuda(sample)
# Happens on the last batch.
if "net_input" not in sample:
continue
yield sample
def log(self, *args, **kwargs):
self.progress_bar.log(*args, **kwargs)
def print(self, *args, **kwargs):
self.progress_bar.print(*args, **kwargs)
def get_res_file(self, fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
if self.data_parallel_world_size > 1:
fname = f"{fname}.{self.data_parallel_rank}"
return open(fname, "w", buffering=1)
def merge_shards(self) -> None:
"""Merges all shard files into shard 0, then removes shard suffix."""
shard_id = self.data_parallel_rank
num_shards = self.data_parallel_world_size
if self.data_parallel_world_size > 1:
def merge_shards_with_root(fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
logger.info("Merging %s on shard %d", fname, shard_id)
base_fpath = Path(f"{fname}.0")
with open(base_fpath, "a") as out_file:
for s in range(1, num_shards):
shard_fpath = Path(f"{fname}.{s}")
with open(shard_fpath, "r") as in_file:
for line in in_file:
out_file.write(line)
shard_fpath.unlink()
shutil.move(f"{fname}.0", fname)
dist.barrier() # ensure all shards finished writing
if shard_id == (0 % num_shards):
merge_shards_with_root("hypo.word")
if shard_id == (1 % num_shards):
merge_shards_with_root("hypo.units")
if shard_id == (2 % num_shards):
merge_shards_with_root("ref.word")
if shard_id == (3 % num_shards):
merge_shards_with_root("ref.units")
dist.barrier()
def optimize_model(self, model: FairseqModel) -> None:
model.make_generation_fast_()
if self.cfg.common.fp16:
model.half()
if not self.cfg.common.cpu:
model.cuda()
def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]:
arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(self.cfg.common_eval.path, separator="\\"),
arg_overrides=arg_overrides,
task=self.task,
suffix=self.cfg.checkpoint.checkpoint_suffix,
strict=(self.cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=self.cfg.checkpoint.checkpoint_shard_count,
)
for model in models:
self.optimize_model(model)
return models, saved_cfg
def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None:
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.gen_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
).next_epoch_itr(shuffle=False)
def build_progress_bar(
self,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default_log_format: str = "tqdm",
) -> BaseProgressBar:
return progress_bar.progress_bar(
iterator=self.get_dataset_itr(),
log_format=self.cfg.common.log_format,
log_interval=self.cfg.common.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=self.cfg.common.tensorboard_logdir,
default_log_format=default_log_format,
)
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
def process_sentence(
self,
sample: Dict[str, Any],
hypo: Dict[str, Any],
sid: int,
batch_id: int,
) -> Tuple[int, int]:
speaker = None # Speaker can't be parsed from dataset.
if "target_label" in sample:
toks = sample["target_label"]
else:
toks = sample["target"]
toks = toks[batch_id, :]
# Processes hypothesis.
hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process)
# Processes target.
target_tokens = utils.strip_pad(toks, self.tgt_dict.pad())
tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu())
tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process)
if self.cfg.decoding.results_path is not None:
print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file)
print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file)
print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file)
print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file)
if not self.cfg.common_eval.quiet:
logger.info(f"HYPO: {hyp_words}")
logger.info(f"REF: {tgt_words}")
logger.info("---------------------")
hyp_words, tgt_words = hyp_words.split(), tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def process_sample(self, sample: Dict[str, Any]) -> None:
self.gen_timer.start()
hypos = self.task.inference_step(
generator=self.generator,
models=self.models,
sample=sample,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
self.gen_timer.stop(num_generated_tokens)
self.wps_meter.update(num_generated_tokens)
for batch_id, sample_id in enumerate(sample["id"].tolist()):
errs, length = self.process_sentence(
sample=sample,
sid=sample_id,
batch_id=batch_id,
hypo=hypos[batch_id][0],
)
self.total_errors += errs
self.total_length += length
self.log({"wps": round(self.wps_meter.avg)})
if "nsentences" in sample:
self.num_sentences += sample["nsentences"]
else:
self.num_sentences += sample["id"].numel()
def log_generation_time(self) -> None:
logger.info(
"Processed %d sentences (%d tokens) in %.1fs %.2f "
"sentences per second, %.2f tokens per second)",
self.num_sentences,
self.gen_timer.n,
self.gen_timer.sum,
self.num_sentences / self.gen_timer.sum,
1.0 / self.gen_timer.avg,
)
def parse_wer(wer_file: Path) -> float:
with open(wer_file, "r") as f:
return float(f.readline().strip().split(" ")[1])
def get_wer_file(cfg: InferConfig) -> Path:
"""Hashes the decoding parameters to a unique file ID."""
base_path = "wer"
if cfg.decoding.results_path is not None:
base_path = os.path.join(cfg.decoding.results_path, base_path)
if cfg.decoding.unique_wer_file:
yaml_str = OmegaConf.to_yaml(cfg.decoding)
fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16)
return Path(f"{base_path}.{fid % 1000000}")
else:
return Path(base_path)
def main(cfg: InferConfig) -> float:
"""Entry point for main processing logic.
Args:
cfg: The inferance configuration to use.
wer: Optional shared memory pointer for returning the WER. If not None,
the final WER value will be written here instead of being returned.
Returns:
The final WER if `wer` is None, otherwise None.
"""
yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg)
# Validates the provided configuration.
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 4000000
if not cfg.common.cpu and not torch.cuda.is_available():
raise ValueError("CUDA not found; set `cpu=True` to run without CUDA")
with InferenceProcessor(cfg) as processor:
for sample in processor:
processor.process_sample(sample)
processor.log_generation_time()
if cfg.decoding.results_path is not None:
processor.merge_shards()
errs_t, leng_t = processor.total_errors, processor.total_length
if cfg.common.cpu:
logger.warning("Merging WER requires CUDA.")
elif processor.data_parallel_world_size > 1:
stats = torch.LongTensor([errs_t, leng_t]).cuda()
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
errs_t, leng_t = stats[0].item(), stats[1].item()
wer = errs_t * 100.0 / leng_t
if distributed_utils.is_master(cfg.distributed_training):
with open(wer_file, "w") as f:
f.write(
(
f"WER: {wer}\n"
f"err / num_ref_words = {errs_t} / {leng_t}\n\n"
f"{yaml_str}"
)
)
return wer
@hydra.main(config_path=config_path, config_name="infer")
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
# logger.info("Config:\n%s", OmegaConf.to_yaml(cfg))
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
wer = parse_wer(get_wer_file(cfg))
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
logger.info("Word error rate: %.4f", wer)
if cfg.is_ax:
return wer, None
return wer
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| []
| []
| [
"LOGLEVEL"
]
| [] | ["LOGLEVEL"] | python | 1 | 0 | |
imaginary.go | package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"runtime"
d "runtime/debug"
"strconv"
"strings"
"time"
"github.com/h2non/bimg"
)
var (
aAddr = flag.String("a", "", "Bind address")
aPort = flag.Int("p", 8088, "Port to listen")
aVers = flag.Bool("v", false, "Show version")
aVersl = flag.Bool("version", false, "Show version")
aHelp = flag.Bool("h", false, "Show help")
aHelpl = flag.Bool("help", false, "Show help")
aPathPrefix = flag.String("path-prefix", "/", "Url path prefix to listen to")
aCors = flag.Bool("cors", false, "Enable CORS support")
aGzip = flag.Bool("gzip", false, "Enable gzip compression (deprecated)")
aAuthForwarding = flag.Bool("enable-auth-forwarding", false, "Forwards X-Forward-Authorization or Authorization header to the image source server. -enable-url-source flag must be defined. Tip: secure your server from public access to prevent attack vectors")
aEnableURLSource = flag.Bool("enable-url-source", false, "Enable remote HTTP URL image source processing")
aEnablePlaceholder = flag.Bool("enable-placeholder", false, "Enable image response placeholder to be used in case of error")
aEnableURLSignature = flag.Bool("enable-url-signature", false, "Enable URL signature (URL-safe Base64-encoded HMAC digest)")
aURLSignatureKey = flag.String("url-signature-key", "", "The URL signature key (32 characters minimum)")
aAllowedOrigins = flag.String("allowed-origins", "", "Restrict remote image source processing to certain origins (separated by commas). Note: Origins are validated against host *AND* path.")
aMaxAllowedSize = flag.Int("max-allowed-size", 0, "Restrict maximum size of http image source (in bytes)")
aKey = flag.String("key", "", "Define API key for authorization")
aMount = flag.String("mount", "", "Mount server local directory")
aCertFile = flag.String("certfile", "", "TLS certificate file path")
aKeyFile = flag.String("keyfile", "", "TLS private key file path")
aAuthorization = flag.String("authorization", "", "Defines a constant Authorization header value passed to all the image source servers. -enable-url-source flag must be defined. This overwrites authorization headers forwarding behavior via X-Forward-Authorization")
aForwardHeaders = flag.String("forward-headers", "", "Forwards custom headers to the image source server. -enable-url-source flag must be defined.")
aPlaceholder = flag.String("placeholder", "", "Image path to image custom placeholder to be used in case of error. Recommended minimum image size is: 1200x1200")
aPlaceholderStatus = flag.Int("placeholder-status", 0, "HTTP status returned when use -placeholder flag")
aDisableEndpoints = flag.String("disable-endpoints", "", "Comma separated endpoints to disable. E.g: form,crop,rotate,health")
aHTTPCacheTTL = flag.Int("http-cache-ttl", -1, "The TTL in seconds")
aReadTimeout = flag.Int("http-read-timeout", 60, "HTTP read timeout in seconds")
aWriteTimeout = flag.Int("http-write-timeout", 60, "HTTP write timeout in seconds")
aConcurrency = flag.Int("concurrency", 0, "Throttle concurrency limit per second")
aBurst = flag.Int("burst", 100, "Throttle burst max cache size")
aMRelease = flag.Int("mrelease", 30, "OS memory release interval in seconds")
aCpus = flag.Int("cpus", runtime.GOMAXPROCS(-1), "Number of cpu cores to use")
aLogLevel = flag.String("log-level", "info", "Define log level for http-server. E.g: info,warning,error")
aReturnSize = flag.Bool("return-size", false, "Return the image size in the HTTP headers")
)
const usage = `imaginary %s
Usage:
imaginary -p 80
imaginary -cors
imaginary -concurrency 10
imaginary -path-prefix /api/v1
imaginary -enable-url-source
imaginary -disable-endpoints form,health,crop,rotate
imaginary -enable-url-source -allowed-origins http://localhost,http://server.com
imaginary -enable-url-source -enable-auth-forwarding
imaginary -enable-url-source -authorization "Basic AwDJdL2DbwrD=="
imaginary -enable-placeholder
imaginary -enable-url-source -placeholder ./placeholder.jpg
imaginary -enable-url-signature -url-signature-key 4f46feebafc4b5e988f131c4ff8b5997
imaginary -enable-url-source -forward-headers X-Custom,X-Token
imaginary -h | -help
imaginary -v | -version
Options:
-a <addr> Bind address [default: *]
-p <port> Bind port [default: 8088]
-h, -help Show help
-v, -version Show version
-path-prefix <value> Url path prefix to listen to [default: "/"]
-cors Enable CORS support [default: false]
-gzip Enable gzip compression (deprecated) [default: false]
-disable-endpoints Comma separated endpoints to disable. E.g: form,crop,rotate,health [default: ""]
-key <key> Define API key for authorization
-mount <path> Mount server local directory
-http-cache-ttl <num> The TTL in seconds. Adds caching headers to locally served files.
-http-read-timeout <num> HTTP read timeout in seconds [default: 30]
-http-write-timeout <num> HTTP write timeout in seconds [default: 30]
-enable-url-source Enable remote HTTP URL image source processing
-enable-placeholder Enable image response placeholder to be used in case of error [default: false]
-enable-auth-forwarding Forwards X-Forward-Authorization or Authorization header to the image source server. -enable-url-source flag must be defined. Tip: secure your server from public access to prevent attack vectors
-forward-headers Forwards custom headers to the image source server. -enable-url-source flag must be defined.
-enable-url-signature Enable URL signature (URL-safe Base64-encoded HMAC digest) [default: false]
-url-signature-key The URL signature key (32 characters minimum)
-allowed-origins <urls> Restrict remote image source processing to certain origins (separated by commas)
-max-allowed-size <bytes> Restrict maximum size of http image source (in bytes)
-certfile <path> TLS certificate file path
-keyfile <path> TLS private key file path
-authorization <value> Defines a constant Authorization header value passed to all the image source servers. -enable-url-source flag must be defined. This overwrites authorization headers forwarding behavior via X-Forward-Authorization
-placeholder <path> Image path to image custom placeholder to be used in case of error. Recommended minimum image size is: 1200x1200
-placeholder-status <code> HTTP status returned when use -placeholder flag
-concurrency <num> Throttle concurrency limit per second [default: disabled]
-burst <num> Throttle burst max cache size [default: 100]
-mrelease <num> OS memory release interval in seconds [default: 30]
-cpus <num> Number of used cpu cores.
(default for current machine is %d cores)
-log-level Set log level for http-server. E.g: info,warning,error [default: info].
Or can use the environment variable GOLANG_LOG=info.
-return-size Return the image size with X-Width and X-Height HTTP header. [default: disabled].
`
type URLSignature struct {
Key string
}
func main() {
flag.Usage = func() {
_, _ = fmt.Fprintf(os.Stderr, usage, Version, runtime.NumCPU())
}
flag.Parse()
if *aHelp || *aHelpl {
showUsage()
}
if *aVers || *aVersl {
showVersion()
}
// Only required in Go < 1.5
runtime.GOMAXPROCS(*aCpus)
port := getPort(*aPort)
urlSignature := getURLSignature(*aURLSignatureKey)
opts := ServerOptions{
Port: port,
Address: *aAddr,
CORS: *aCors,
AuthForwarding: *aAuthForwarding,
EnableURLSource: *aEnableURLSource,
EnablePlaceholder: *aEnablePlaceholder,
EnableURLSignature: *aEnableURLSignature,
URLSignatureKey: urlSignature.Key,
PathPrefix: *aPathPrefix,
APIKey: *aKey,
Concurrency: *aConcurrency,
Burst: *aBurst,
Mount: *aMount,
CertFile: *aCertFile,
KeyFile: *aKeyFile,
Placeholder: *aPlaceholder,
PlaceholderStatus: *aPlaceholderStatus,
HTTPCacheTTL: *aHTTPCacheTTL,
HTTPReadTimeout: *aReadTimeout,
HTTPWriteTimeout: *aWriteTimeout,
Authorization: *aAuthorization,
ForwardHeaders: parseForwardHeaders(*aForwardHeaders),
AllowedOrigins: parseOrigins(*aAllowedOrigins),
MaxAllowedSize: *aMaxAllowedSize,
LogLevel: getLogLevel(*aLogLevel),
ReturnSize: *aReturnSize,
}
// Show warning if gzip flag is passed
if *aGzip {
fmt.Println("warning: -gzip flag is deprecated and will not have effect")
}
// Create a memory release goroutine
if *aMRelease > 0 {
memoryRelease(*aMRelease)
}
// Check if the mount directory exists, if present
if *aMount != "" {
checkMountDirectory(*aMount)
}
// Validate HTTP cache param, if present
if *aHTTPCacheTTL != -1 {
checkHTTPCacheTTL(*aHTTPCacheTTL)
}
// Parse endpoint names to disabled, if present
if *aDisableEndpoints != "" {
opts.Endpoints = parseEndpoints(*aDisableEndpoints)
}
// Read placeholder image, if required
if *aPlaceholder != "" {
buf, err := ioutil.ReadFile(*aPlaceholder)
if err != nil {
exitWithError("cannot start the server: %s", err)
}
imageType := bimg.DetermineImageType(buf)
if !bimg.IsImageTypeSupportedByVips(imageType).Load {
exitWithError("Placeholder image type is not supported. Only JPEG, PNG or WEBP are supported")
}
opts.PlaceholderImage = buf
} else if *aEnablePlaceholder {
// Expose default placeholder
opts.PlaceholderImage = placeholder
}
// Check URL signature key, if required
if *aEnableURLSignature {
if urlSignature.Key == "" {
exitWithError("URL signature key is required")
}
if len(urlSignature.Key) < 32 {
exitWithError("URL signature key must be a minimum of 32 characters")
}
}
debug("imaginary server listening on port :%d/%s", opts.Port, strings.TrimPrefix(opts.PathPrefix, "/"))
// Load image source providers
LoadSources(opts)
// Start the server
Server(opts)
}
func getPort(port int) int {
if portEnv := os.Getenv("PORT"); portEnv != "" {
newPort, _ := strconv.Atoi(portEnv)
if newPort > 0 {
port = newPort
}
}
return port
}
func getURLSignature(key string) URLSignature {
if keyEnv := os.Getenv("URL_SIGNATURE_KEY"); keyEnv != "" {
key = keyEnv
}
return URLSignature{key}
}
func getLogLevel(logLevel string) string {
if logLevelEnv := os.Getenv("GOLANG_LOG"); logLevelEnv != "" {
logLevel = logLevelEnv
}
return logLevel
}
func showUsage() {
flag.Usage()
os.Exit(1)
}
func showVersion() {
fmt.Println(Version)
os.Exit(1)
}
func checkMountDirectory(path string) {
src, err := os.Stat(path)
if err != nil {
exitWithError("error while mounting directory: %s", err)
}
if !src.IsDir() {
exitWithError("mount path is not a directory: %s", path)
}
if path == "/" {
exitWithError("cannot mount root directory for security reasons")
}
}
func checkHTTPCacheTTL(ttl int) {
if ttl < 0 || ttl > 31556926 {
exitWithError("The -http-cache-ttl flag only accepts a value from 0 to 31556926")
}
if ttl == 0 {
debug("Adding HTTP cache control headers set to prevent caching.")
}
}
func parseForwardHeaders(forwardHeaders string) []string {
var headers []string
if forwardHeaders == "" {
return headers
}
for _, header := range strings.Split(forwardHeaders, ",") {
if norm := strings.TrimSpace(header); norm != "" {
headers = append(headers, norm)
}
}
return headers
}
func parseOrigins(origins string) []*url.URL {
var urls []*url.URL
if origins == "" {
return urls
}
for _, origin := range strings.Split(origins, ",") {
u, err := url.Parse(origin)
if err != nil {
continue
}
if u.Path != "" {
var lastChar = u.Path[len(u.Path)-1:]
if lastChar == "*" {
u.Path = strings.TrimSuffix(u.Path, "*")
} else if lastChar != "/" {
u.Path += "/"
}
}
urls = append(urls, u)
}
return urls
}
func parseEndpoints(input string) Endpoints {
var endpoints Endpoints
for _, endpoint := range strings.Split(input, ",") {
endpoint = strings.ToLower(strings.TrimSpace(endpoint))
if endpoint != "" {
endpoints = append(endpoints, endpoint)
}
}
return endpoints
}
func memoryRelease(interval int) {
ticker := time.NewTicker(time.Duration(interval) * time.Second)
go func() {
for range ticker.C {
debug("FreeOSMemory()")
d.FreeOSMemory()
}
}()
}
func exitWithError(format string, args ...interface{}) {
_, _ = fmt.Fprintf(os.Stderr, format+"\n", args)
os.Exit(1)
}
func debug(msg string, values ...interface{}) {
debug := os.Getenv("DEBUG")
if debug == "imaginary" || debug == "*" {
log.Printf(msg, values...)
}
}
| [
"\"PORT\"",
"\"URL_SIGNATURE_KEY\"",
"\"GOLANG_LOG\"",
"\"DEBUG\""
]
| []
| [
"PORT",
"GOLANG_LOG",
"URL_SIGNATURE_KEY",
"DEBUG"
]
| [] | ["PORT", "GOLANG_LOG", "URL_SIGNATURE_KEY", "DEBUG"] | go | 4 | 0 | |
src/cmd/link/internal/ld/pcln.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ld
import (
"cmd/internal/goobj"
"cmd/internal/objabi"
"cmd/internal/sys"
"cmd/link/internal/loader"
"cmd/link/internal/sym"
"fmt"
"internal/buildcfg"
"os"
"path/filepath"
)
// pclntab holds the state needed for pclntab generation.
type pclntab struct {
// The size of the func object in the runtime.
funcSize uint32
// The first and last functions found.
firstFunc, lastFunc loader.Sym
// Running total size of pclntab.
size int64
// runtime.pclntab's symbols
carrier loader.Sym
pclntab loader.Sym
pcheader loader.Sym
funcnametab loader.Sym
findfunctab loader.Sym
cutab loader.Sym
filetab loader.Sym
pctab loader.Sym
// The number of functions + number of TEXT sections - 1. This is such an
// unexpected value because platforms that have more than one TEXT section
// get a dummy function inserted between because the external linker can place
// functions in those areas. We mark those areas as not covered by the Go
// runtime.
//
// On most platforms this is the number of reachable functions.
nfunc int32
// The number of filenames in runtime.filetab.
nfiles uint32
}
// addGeneratedSym adds a generator symbol to pclntab, returning the new Sym.
// It is the caller's responsibility to save they symbol in state.
func (state *pclntab) addGeneratedSym(ctxt *Link, name string, size int64, f generatorFunc) loader.Sym {
size = Rnd(size, int64(ctxt.Arch.PtrSize))
state.size += size
s := ctxt.createGeneratorSymbol(name, 0, sym.SPCLNTAB, size, f)
ctxt.loader.SetAttrReachable(s, true)
ctxt.loader.SetCarrierSym(s, state.carrier)
ctxt.loader.SetAttrNotInSymbolTable(s, true)
return s
}
// makePclntab makes a pclntab object, and assembles all the compilation units
// we'll need to write pclntab. Returns the pclntab structure, a slice of the
// CompilationUnits we need, and a slice of the function symbols we need to
// generate pclntab.
func makePclntab(ctxt *Link, container loader.Bitmap) (*pclntab, []*sym.CompilationUnit, []loader.Sym) {
ldr := ctxt.loader
state := &pclntab{
// This is the size of the _func object in runtime/runtime2.go.
funcSize: uint32(ctxt.Arch.PtrSize + 9*4),
}
// Gather some basic stats and info.
seenCUs := make(map[*sym.CompilationUnit]struct{})
prevSect := ldr.SymSect(ctxt.Textp[0])
compUnits := []*sym.CompilationUnit{}
funcs := []loader.Sym{}
for _, s := range ctxt.Textp {
if !emitPcln(ctxt, s, container) {
continue
}
funcs = append(funcs, s)
state.nfunc++
if state.firstFunc == 0 {
state.firstFunc = s
}
state.lastFunc = s
ss := ldr.SymSect(s)
if ss != prevSect {
// With multiple text sections, the external linker may
// insert functions between the sections, which are not
// known by Go. This leaves holes in the PC range covered
// by the func table. We need to generate an entry to mark
// the hole.
state.nfunc++
prevSect = ss
}
// We need to keep track of all compilation units we see. Some symbols
// (eg, go.buildid, _cgoexp_, etc) won't have a compilation unit.
cu := ldr.SymUnit(s)
if _, ok := seenCUs[cu]; cu != nil && !ok {
seenCUs[cu] = struct{}{}
cu.PclnIndex = len(compUnits)
compUnits = append(compUnits, cu)
}
}
return state, compUnits, funcs
}
func emitPcln(ctxt *Link, s loader.Sym, container loader.Bitmap) bool {
// We want to generate func table entries only for the "lowest
// level" symbols, not containers of subsymbols.
return !container.Has(s)
}
func computeDeferReturn(ctxt *Link, deferReturnSym, s loader.Sym) uint32 {
ldr := ctxt.loader
target := ctxt.Target
deferreturn := uint32(0)
lastWasmAddr := uint32(0)
relocs := ldr.Relocs(s)
for ri := 0; ri < relocs.Count(); ri++ {
r := relocs.At(ri)
if target.IsWasm() && r.Type() == objabi.R_ADDR {
// Wasm does not have a live variable set at the deferreturn
// call itself. Instead it has one identified by the
// resumption point immediately preceding the deferreturn.
// The wasm code has a R_ADDR relocation which is used to
// set the resumption point to PC_B.
lastWasmAddr = uint32(r.Add())
}
if r.Type().IsDirectCall() && (r.Sym() == deferReturnSym || ldr.IsDeferReturnTramp(r.Sym())) {
if target.IsWasm() {
deferreturn = lastWasmAddr - 1
} else {
// Note: the relocation target is in the call instruction, but
// is not necessarily the whole instruction (for instance, on
// x86 the relocation applies to bytes [1:5] of the 5 byte call
// instruction).
deferreturn = uint32(r.Off())
switch target.Arch.Family {
case sys.AMD64, sys.I386:
deferreturn--
case sys.PPC64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
// no change
case sys.RISCV64:
// TODO(jsing): The JALR instruction is marked with
// R_CALLRISCV, whereas the actual reloc is currently
// one instruction earlier starting with the AUIPC.
deferreturn -= 4
case sys.S390X:
deferreturn -= 2
default:
panic(fmt.Sprint("Unhandled architecture:", target.Arch.Family))
}
}
break // only need one
}
}
return deferreturn
}
// genInlTreeSym generates the InlTree sym for a function with the
// specified FuncInfo.
func genInlTreeSym(ctxt *Link, cu *sym.CompilationUnit, fi loader.FuncInfo, arch *sys.Arch, nameOffsets map[loader.Sym]uint32) loader.Sym {
ldr := ctxt.loader
its := ldr.CreateExtSym("", 0)
inlTreeSym := ldr.MakeSymbolUpdater(its)
// Note: the generated symbol is given a type of sym.SGOFUNC, as a
// signal to the symtab() phase that it needs to be grouped in with
// other similar symbols (gcdata, etc); the dodata() phase will
// eventually switch the type back to SRODATA.
inlTreeSym.SetType(sym.SGOFUNC)
ldr.SetAttrReachable(its, true)
ninl := fi.NumInlTree()
for i := 0; i < int(ninl); i++ {
call := fi.InlTree(i)
val := call.File
nameoff, ok := nameOffsets[call.Func]
if !ok {
panic("couldn't find function name offset")
}
inlTreeSym.SetUint16(arch, int64(i*20+0), uint16(call.Parent))
inlFunc := ldr.FuncInfo(call.Func)
var funcID objabi.FuncID
if inlFunc.Valid() {
funcID = inlFunc.FuncID()
}
inlTreeSym.SetUint8(arch, int64(i*20+2), uint8(funcID))
// byte 3 is unused
inlTreeSym.SetUint32(arch, int64(i*20+4), uint32(val))
inlTreeSym.SetUint32(arch, int64(i*20+8), uint32(call.Line))
inlTreeSym.SetUint32(arch, int64(i*20+12), uint32(nameoff))
inlTreeSym.SetUint32(arch, int64(i*20+16), uint32(call.ParentPC))
}
return its
}
// makeInlSyms returns a map of loader.Sym that are created inlSyms.
func makeInlSyms(ctxt *Link, funcs []loader.Sym, nameOffsets map[loader.Sym]uint32) map[loader.Sym]loader.Sym {
ldr := ctxt.loader
// Create the inline symbols we need.
inlSyms := make(map[loader.Sym]loader.Sym)
for _, s := range funcs {
if fi := ldr.FuncInfo(s); fi.Valid() {
fi.Preload()
if fi.NumInlTree() > 0 {
inlSyms[s] = genInlTreeSym(ctxt, ldr.SymUnit(s), fi, ctxt.Arch, nameOffsets)
}
}
}
return inlSyms
}
// generatePCHeader creates the runtime.pcheader symbol, setting it up as a
// generator to fill in its data later.
func (state *pclntab) generatePCHeader(ctxt *Link) {
writeHeader := func(ctxt *Link, s loader.Sym) {
ldr := ctxt.loader
header := ctxt.loader.MakeSymbolUpdater(s)
writeSymOffset := func(off int64, ws loader.Sym) int64 {
diff := ldr.SymValue(ws) - ldr.SymValue(s)
if diff <= 0 {
name := ldr.SymName(ws)
panic(fmt.Sprintf("expected runtime.pcheader(%x) to be placed before %s(%x)", ldr.SymValue(s), name, ldr.SymValue(ws)))
}
return header.SetUintptr(ctxt.Arch, off, uintptr(diff))
}
// Write header.
// Keep in sync with runtime/symtab.go:pcHeader.
header.SetUint32(ctxt.Arch, 0, 0xfffffffa)
header.SetUint8(ctxt.Arch, 6, uint8(ctxt.Arch.MinLC))
header.SetUint8(ctxt.Arch, 7, uint8(ctxt.Arch.PtrSize))
off := header.SetUint(ctxt.Arch, 8, uint64(state.nfunc))
off = header.SetUint(ctxt.Arch, off, uint64(state.nfiles))
off = writeSymOffset(off, state.funcnametab)
off = writeSymOffset(off, state.cutab)
off = writeSymOffset(off, state.filetab)
off = writeSymOffset(off, state.pctab)
off = writeSymOffset(off, state.pclntab)
}
size := int64(8 + 7*ctxt.Arch.PtrSize)
state.pcheader = state.addGeneratedSym(ctxt, "runtime.pcheader", size, writeHeader)
}
// walkFuncs iterates over the funcs, calling a function for each unique
// function and inlined function.
func walkFuncs(ctxt *Link, funcs []loader.Sym, f func(loader.Sym)) {
ldr := ctxt.loader
seen := make(map[loader.Sym]struct{})
for _, s := range funcs {
if _, ok := seen[s]; !ok {
f(s)
seen[s] = struct{}{}
}
fi := ldr.FuncInfo(s)
if !fi.Valid() {
continue
}
fi.Preload()
for i, ni := 0, fi.NumInlTree(); i < int(ni); i++ {
call := fi.InlTree(i).Func
if _, ok := seen[call]; !ok {
f(call)
seen[call] = struct{}{}
}
}
}
}
// generateFuncnametab creates the function name table. Returns a map of
// func symbol to the name offset in runtime.funcnamtab.
func (state *pclntab) generateFuncnametab(ctxt *Link, funcs []loader.Sym) map[loader.Sym]uint32 {
nameOffsets := make(map[loader.Sym]uint32, state.nfunc)
// Write the null terminated strings.
writeFuncNameTab := func(ctxt *Link, s loader.Sym) {
symtab := ctxt.loader.MakeSymbolUpdater(s)
for s, off := range nameOffsets {
symtab.AddStringAt(int64(off), ctxt.loader.SymName(s))
}
}
// Loop through the CUs, and calculate the size needed.
var size int64
walkFuncs(ctxt, funcs, func(s loader.Sym) {
nameOffsets[s] = uint32(size)
size += int64(ctxt.loader.SymNameLen(s)) + 1 // NULL terminate
})
state.funcnametab = state.addGeneratedSym(ctxt, "runtime.funcnametab", size, writeFuncNameTab)
return nameOffsets
}
// walkFilenames walks funcs, calling a function for each filename used in each
// function's line table.
func walkFilenames(ctxt *Link, funcs []loader.Sym, f func(*sym.CompilationUnit, goobj.CUFileIndex)) {
ldr := ctxt.loader
// Loop through all functions, finding the filenames we need.
for _, s := range funcs {
fi := ldr.FuncInfo(s)
if !fi.Valid() {
continue
}
fi.Preload()
cu := ldr.SymUnit(s)
for i, nf := 0, int(fi.NumFile()); i < nf; i++ {
f(cu, fi.File(i))
}
for i, ninl := 0, int(fi.NumInlTree()); i < ninl; i++ {
call := fi.InlTree(i)
f(cu, call.File)
}
}
}
// generateFilenameTabs creates LUTs needed for filename lookup. Returns a slice
// of the index at which each CU begins in runtime.cutab.
//
// Function objects keep track of the files they reference to print the stack.
// This function creates a per-CU list of filenames if CU[M] references
// files[1-N], the following is generated:
//
// runtime.cutab:
// CU[M]
// offsetToFilename[0]
// offsetToFilename[1]
// ..
//
// runtime.filetab
// filename[0]
// filename[1]
//
// Looking up a filename then becomes:
// 0) Given a func, and filename index [K]
// 1) Get Func.CUIndex: M := func.cuOffset
// 2) Find filename offset: fileOffset := runtime.cutab[M+K]
// 3) Get the filename: getcstring(runtime.filetab[fileOffset])
func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.CompilationUnit, funcs []loader.Sym) []uint32 {
// On a per-CU basis, keep track of all the filenames we need.
//
// Note, that we store the filenames in a separate section in the object
// files, and deduplicate based on the actual value. It would be better to
// store the filenames as symbols, using content addressable symbols (and
// then not loading extra filenames), and just use the hash value of the
// symbol name to do this cataloging.
//
// TODO: Store filenames as symbols. (Note this would be easiest if you
// also move strings to ALWAYS using the larger content addressable hash
// function, and use that hash value for uniqueness testing.)
cuEntries := make([]goobj.CUFileIndex, len(compUnits))
fileOffsets := make(map[string]uint32)
// Walk the filenames.
// We store the total filename string length we need to load, and the max
// file index we've seen per CU so we can calculate how large the
// CU->global table needs to be.
var fileSize int64
walkFilenames(ctxt, funcs, func(cu *sym.CompilationUnit, i goobj.CUFileIndex) {
// Note we use the raw filename for lookup, but use the expanded filename
// when we save the size.
filename := cu.FileTable[i]
if _, ok := fileOffsets[filename]; !ok {
fileOffsets[filename] = uint32(fileSize)
fileSize += int64(len(expandFile(filename)) + 1) // NULL terminate
}
// Find the maximum file index we've seen.
if cuEntries[cu.PclnIndex] < i+1 {
cuEntries[cu.PclnIndex] = i + 1 // Store max + 1
}
})
// Calculate the size of the runtime.cutab variable.
var totalEntries uint32
cuOffsets := make([]uint32, len(cuEntries))
for i, entries := range cuEntries {
// Note, cutab is a slice of uint32, so an offset to a cu's entry is just the
// running total of all cu indices we've needed to store so far, not the
// number of bytes we've stored so far.
cuOffsets[i] = totalEntries
totalEntries += uint32(entries)
}
// Write cutab.
writeCutab := func(ctxt *Link, s loader.Sym) {
sb := ctxt.loader.MakeSymbolUpdater(s)
var off int64
for i, max := range cuEntries {
// Write the per CU LUT.
cu := compUnits[i]
for j := goobj.CUFileIndex(0); j < max; j++ {
fileOffset, ok := fileOffsets[cu.FileTable[j]]
if !ok {
// We're looping through all possible file indices. It's possible a file's
// been deadcode eliminated, and although it's a valid file in the CU, it's
// not needed in this binary. When that happens, use an invalid offset.
fileOffset = ^uint32(0)
}
off = sb.SetUint32(ctxt.Arch, off, fileOffset)
}
}
}
state.cutab = state.addGeneratedSym(ctxt, "runtime.cutab", int64(totalEntries*4), writeCutab)
// Write filetab.
writeFiletab := func(ctxt *Link, s loader.Sym) {
sb := ctxt.loader.MakeSymbolUpdater(s)
// Write the strings.
for filename, loc := range fileOffsets {
sb.AddStringAt(int64(loc), expandFile(filename))
}
}
state.nfiles = uint32(len(fileOffsets))
state.filetab = state.addGeneratedSym(ctxt, "runtime.filetab", fileSize, writeFiletab)
return cuOffsets
}
// generatePctab creates the runtime.pctab variable, holding all the
// deduplicated pcdata.
func (state *pclntab) generatePctab(ctxt *Link, funcs []loader.Sym) {
ldr := ctxt.loader
// Pctab offsets of 0 are considered invalid in the runtime. We respect
// that by just padding a single byte at the beginning of runtime.pctab,
// that way no real offsets can be zero.
size := int64(1)
// Walk the functions, finding offset to store each pcdata.
seen := make(map[loader.Sym]struct{})
saveOffset := func(pcSym loader.Sym) {
if _, ok := seen[pcSym]; !ok {
datSize := ldr.SymSize(pcSym)
if datSize != 0 {
ldr.SetSymValue(pcSym, size)
} else {
// Invalid PC data, record as zero.
ldr.SetSymValue(pcSym, 0)
}
size += datSize
seen[pcSym] = struct{}{}
}
}
for _, s := range funcs {
fi := ldr.FuncInfo(s)
if !fi.Valid() {
continue
}
fi.Preload()
pcSyms := []loader.Sym{fi.Pcsp(), fi.Pcfile(), fi.Pcline()}
for _, pcSym := range pcSyms {
saveOffset(pcSym)
}
for _, pcSym := range fi.Pcdata() {
saveOffset(pcSym)
}
if fi.NumInlTree() > 0 {
saveOffset(fi.Pcinline())
}
}
// TODO: There is no reason we need a generator for this variable, and it
// could be moved to a carrier symbol. However, carrier symbols containing
// carrier symbols don't work yet (as of Aug 2020). Once this is fixed,
// runtime.pctab could just be a carrier sym.
writePctab := func(ctxt *Link, s loader.Sym) {
ldr := ctxt.loader
sb := ldr.MakeSymbolUpdater(s)
for sym := range seen {
sb.SetBytesAt(ldr.SymValue(sym), ldr.Data(sym))
}
}
state.pctab = state.addGeneratedSym(ctxt, "runtime.pctab", size, writePctab)
}
// numPCData returns the number of PCData syms for the FuncInfo.
// NB: Preload must be called on valid FuncInfos before calling this function.
func numPCData(fi loader.FuncInfo) uint32 {
if !fi.Valid() {
return 0
}
numPCData := uint32(len(fi.Pcdata()))
if fi.NumInlTree() > 0 {
if numPCData < objabi.PCDATA_InlTreeIndex+1 {
numPCData = objabi.PCDATA_InlTreeIndex + 1
}
}
return numPCData
}
// Helper types for iterating pclntab.
type pclnSetAddr func(*loader.SymbolBuilder, *sys.Arch, int64, loader.Sym, int64) int64
type pclnSetUint func(*loader.SymbolBuilder, *sys.Arch, int64, uint64) int64
// generateFunctab creates the runtime.functab
//
// runtime.functab contains two things:
//
// - pc->func look up table.
// - array of func objects, interleaved with pcdata and funcdata
//
// Because of timing in the linker, generating this table takes two passes.
// The first pass is executed early in the link, and it creates any needed
// relocations to layout the data. The pieces that need relocations are:
// 1) the PC->func table.
// 2) The entry points in the func objects.
// 3) The funcdata.
// (1) and (2) are handled in walkPCToFunc. (3) is handled in walkFuncdata.
//
// After relocations, once we know where to write things in the output buffer,
// we execute the second pass, which is actually writing the data.
func (state *pclntab) generateFunctab(ctxt *Link, funcs []loader.Sym, inlSyms map[loader.Sym]loader.Sym, cuOffsets []uint32, nameOffsets map[loader.Sym]uint32) {
// Calculate the size of the table.
size, startLocations := state.calculateFunctabSize(ctxt, funcs)
// If we are internally linking a static executable, the function addresses
// are known, so we can just use them instead of emitting relocations. For
// other cases we still need to emit relocations.
//
// This boolean just helps us figure out which callback to use.
useSymValue := ctxt.IsExe() && ctxt.IsInternal()
writePcln := func(ctxt *Link, s loader.Sym) {
ldr := ctxt.loader
sb := ldr.MakeSymbolUpdater(s)
// Create our callbacks.
var setAddr pclnSetAddr
if useSymValue {
// We need to write the offset.
setAddr = func(s *loader.SymbolBuilder, arch *sys.Arch, off int64, tgt loader.Sym, add int64) int64 {
if v := ldr.SymValue(tgt); v != 0 {
s.SetUint(arch, off, uint64(v+add))
}
return 0
}
} else {
// We already wrote relocations.
setAddr = func(s *loader.SymbolBuilder, arch *sys.Arch, off int64, tgt loader.Sym, add int64) int64 { return 0 }
}
// Write the data.
writePcToFunc(ctxt, sb, funcs, startLocations, setAddr, (*loader.SymbolBuilder).SetUint)
writeFuncs(ctxt, sb, funcs, inlSyms, startLocations, cuOffsets, nameOffsets)
state.writeFuncData(ctxt, sb, funcs, inlSyms, startLocations, setAddr, (*loader.SymbolBuilder).SetUint)
}
state.pclntab = state.addGeneratedSym(ctxt, "runtime.functab", size, writePcln)
// Create the relocations we need.
ldr := ctxt.loader
sb := ldr.MakeSymbolUpdater(state.pclntab)
var setAddr pclnSetAddr
if useSymValue {
// If we should use the symbol value, and we don't have one, write a relocation.
setAddr = func(sb *loader.SymbolBuilder, arch *sys.Arch, off int64, tgt loader.Sym, add int64) int64 {
if v := ldr.SymValue(tgt); v == 0 {
sb.SetAddrPlus(arch, off, tgt, add)
}
return 0
}
} else {
// If we're externally linking, write a relocation.
setAddr = (*loader.SymbolBuilder).SetAddrPlus
}
setUintNOP := func(*loader.SymbolBuilder, *sys.Arch, int64, uint64) int64 { return 0 }
writePcToFunc(ctxt, sb, funcs, startLocations, setAddr, setUintNOP)
if !useSymValue {
// Generate relocations for funcdata when externally linking.
state.writeFuncData(ctxt, sb, funcs, inlSyms, startLocations, setAddr, setUintNOP)
}
}
// funcData returns the funcdata and offsets for the FuncInfo.
// The funcdata and offsets are written into runtime.functab after each func
// object. This is a helper function to make querying the FuncInfo object
// cleaner.
//
// Note, the majority of fdOffsets are 0, meaning there is no offset between
// the compiler's generated symbol, and what the runtime needs. They are
// plumbed through for no loss of generality.
//
// NB: Preload must be called on the FuncInfo before calling.
// NB: fdSyms and fdOffs are used as scratch space.
func funcData(fi loader.FuncInfo, inlSym loader.Sym, fdSyms []loader.Sym, fdOffs []int64) ([]loader.Sym, []int64) {
fdSyms, fdOffs = fdSyms[:0], fdOffs[:0]
if fi.Valid() {
numOffsets := int(fi.NumFuncdataoff())
for i := 0; i < numOffsets; i++ {
fdOffs = append(fdOffs, fi.Funcdataoff(i))
}
fdSyms = fi.Funcdata(fdSyms)
if fi.NumInlTree() > 0 {
if len(fdSyms) < objabi.FUNCDATA_InlTree+1 {
fdSyms = append(fdSyms, make([]loader.Sym, objabi.FUNCDATA_InlTree+1-len(fdSyms))...)
fdOffs = append(fdOffs, make([]int64, objabi.FUNCDATA_InlTree+1-len(fdOffs))...)
}
fdSyms[objabi.FUNCDATA_InlTree] = inlSym
}
}
return fdSyms, fdOffs
}
// calculateFunctabSize calculates the size of the pclntab, and the offsets in
// the output buffer for individual func entries.
func (state pclntab) calculateFunctabSize(ctxt *Link, funcs []loader.Sym) (int64, []uint32) {
ldr := ctxt.loader
startLocations := make([]uint32, len(funcs))
// Allocate space for the pc->func table. This structure consists of a pc
// and an offset to the func structure. After that, we have a single pc
// value that marks the end of the last function in the binary.
size := int64(int(state.nfunc)*2*ctxt.Arch.PtrSize + ctxt.Arch.PtrSize)
// Now find the space for the func objects. We do this in a running manner,
// so that we can find individual starting locations, and because funcdata
// requires alignment.
for i, s := range funcs {
size = Rnd(size, int64(ctxt.Arch.PtrSize))
startLocations[i] = uint32(size)
fi := ldr.FuncInfo(s)
size += int64(state.funcSize)
if fi.Valid() {
fi.Preload()
numFuncData := int(fi.NumFuncdataoff())
if fi.NumInlTree() > 0 {
if numFuncData < objabi.FUNCDATA_InlTree+1 {
numFuncData = objabi.FUNCDATA_InlTree + 1
}
}
size += int64(numPCData(fi) * 4)
if numFuncData > 0 { // Func data is aligned.
size = Rnd(size, int64(ctxt.Arch.PtrSize))
}
size += int64(numFuncData * ctxt.Arch.PtrSize)
}
}
return size, startLocations
}
// writePcToFunc writes the PC->func lookup table.
// This function walks the pc->func lookup table, executing callbacks
// to generate relocations and writing the values for the table.
func writePcToFunc(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, startLocations []uint32, setAddr pclnSetAddr, setUint pclnSetUint) {
ldr := ctxt.loader
var prevFunc loader.Sym
prevSect := ldr.SymSect(funcs[0])
funcIndex := 0
for i, s := range funcs {
if thisSect := ldr.SymSect(s); thisSect != prevSect {
// With multiple text sections, there may be a hole here in the
// address space. We use an invalid funcoff value to mark the hole.
// See also runtime/symtab.go:findfunc
prevFuncSize := int64(ldr.SymSize(prevFunc))
setAddr(sb, ctxt.Arch, int64(funcIndex*2*ctxt.Arch.PtrSize), prevFunc, prevFuncSize)
setUint(sb, ctxt.Arch, int64((funcIndex*2+1)*ctxt.Arch.PtrSize), ^uint64(0))
funcIndex++
prevSect = thisSect
}
prevFunc = s
// TODO: We don't actually need these relocations, provided we go to a
// module->func look-up-table like we do for filenames. We could have a
// single relocation for the module, and have them all laid out as
// offsets from the beginning of that module.
setAddr(sb, ctxt.Arch, int64(funcIndex*2*ctxt.Arch.PtrSize), s, 0)
setUint(sb, ctxt.Arch, int64((funcIndex*2+1)*ctxt.Arch.PtrSize), uint64(startLocations[i]))
funcIndex++
// Write the entry location.
setAddr(sb, ctxt.Arch, int64(startLocations[i]), s, 0)
}
// Final entry of table is just end pc.
setAddr(sb, ctxt.Arch, int64(funcIndex)*2*int64(ctxt.Arch.PtrSize), prevFunc, ldr.SymSize(prevFunc))
}
// writeFuncData writes the funcdata tables.
//
// This function executes a callback for each funcdata needed in
// runtime.functab. It should be called once for internally linked static
// binaries, or twice (once to generate the needed relocations) for other
// build modes.
//
// Note the output of this function is interwoven with writeFuncs, but this is
// a separate function, because it's needed in different passes in
// generateFunctab.
func (state *pclntab) writeFuncData(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSyms map[loader.Sym]loader.Sym, startLocations []uint32, setAddr pclnSetAddr, setUint pclnSetUint) {
ldr := ctxt.loader
funcdata, funcdataoff := []loader.Sym{}, []int64{}
for i, s := range funcs {
fi := ldr.FuncInfo(s)
if !fi.Valid() {
continue
}
fi.Preload()
// funcdata, must be pointer-aligned and we're only int32-aligned.
// Missing funcdata will be 0 (nil pointer).
funcdata, funcdataoff := funcData(fi, inlSyms[s], funcdata, funcdataoff)
if len(funcdata) > 0 {
off := int64(startLocations[i] + state.funcSize + numPCData(fi)*4)
off = Rnd(off, int64(ctxt.Arch.PtrSize))
for j := range funcdata {
dataoff := off + int64(ctxt.Arch.PtrSize*j)
if funcdata[j] == 0 {
setUint(sb, ctxt.Arch, dataoff, uint64(funcdataoff[j]))
continue
}
// TODO: Does this need deduping?
setAddr(sb, ctxt.Arch, dataoff, funcdata[j], funcdataoff[j])
}
}
}
}
// writeFuncs writes the func structures and pcdata to runtime.functab.
func writeFuncs(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSyms map[loader.Sym]loader.Sym, startLocations, cuOffsets []uint32, nameOffsets map[loader.Sym]uint32) {
ldr := ctxt.loader
deferReturnSym := ldr.Lookup("runtime.deferreturn", sym.SymVerABIInternal)
funcdata, funcdataoff := []loader.Sym{}, []int64{}
// Write the individual func objects.
for i, s := range funcs {
fi := ldr.FuncInfo(s)
if fi.Valid() {
fi.Preload()
}
// Note we skip the space for the entry value -- that's handled inn
// walkPCToFunc. We don't write it here, because it might require a
// relocation.
off := startLocations[i] + uint32(ctxt.Arch.PtrSize) // entry
// name int32
nameoff, ok := nameOffsets[s]
if !ok {
panic("couldn't find function name offset")
}
off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(nameoff)))
// args int32
// TODO: Move into funcinfo.
args := uint32(0)
if fi.Valid() {
args = uint32(fi.Args())
}
off = uint32(sb.SetUint32(ctxt.Arch, int64(off), args))
// deferreturn
deferreturn := computeDeferReturn(ctxt, deferReturnSym, s)
off = uint32(sb.SetUint32(ctxt.Arch, int64(off), deferreturn))
// pcdata
if fi.Valid() {
off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcsp()))))
off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcfile()))))
off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcline()))))
} else {
off += 12
}
off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(numPCData(fi))))
// Store the offset to compilation unit's file table.
cuIdx := ^uint32(0)
if cu := ldr.SymUnit(s); cu != nil {
cuIdx = cuOffsets[cu.PclnIndex]
}
off = uint32(sb.SetUint32(ctxt.Arch, int64(off), cuIdx))
// funcID uint8
var funcID objabi.FuncID
if fi.Valid() {
funcID = fi.FuncID()
}
off = uint32(sb.SetUint8(ctxt.Arch, int64(off), uint8(funcID)))
// flag uint8
var flag objabi.FuncFlag
if fi.Valid() {
flag = fi.FuncFlag()
}
off = uint32(sb.SetUint8(ctxt.Arch, int64(off), uint8(flag)))
off += 1 // pad
// nfuncdata must be the final entry.
funcdata, funcdataoff = funcData(fi, 0, funcdata, funcdataoff)
off = uint32(sb.SetUint8(ctxt.Arch, int64(off), uint8(len(funcdata))))
// Output the pcdata.
if fi.Valid() {
for j, pcSym := range fi.Pcdata() {
sb.SetUint32(ctxt.Arch, int64(off+uint32(j*4)), uint32(ldr.SymValue(pcSym)))
}
if fi.NumInlTree() > 0 {
sb.SetUint32(ctxt.Arch, int64(off+objabi.PCDATA_InlTreeIndex*4), uint32(ldr.SymValue(fi.Pcinline())))
}
}
}
}
// pclntab initializes the pclntab symbol with
// runtime function and file name information.
// pclntab generates the pcln table for the link output.
func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab {
// Go 1.2's symtab layout is documented in golang.org/s/go12symtab, but the
// layout and data has changed since that time.
//
// As of August 2020, here's the layout of pclntab:
//
// .gopclntab/__gopclntab [elf/macho section]
// runtime.pclntab
// Carrier symbol for the entire pclntab section.
//
// runtime.pcheader (see: runtime/symtab.go:pcHeader)
// 8-byte magic
// nfunc [thearch.ptrsize bytes]
// offset to runtime.funcnametab from the beginning of runtime.pcheader
// offset to runtime.pclntab_old from beginning of runtime.pcheader
//
// runtime.funcnametab
// []list of null terminated function names
//
// runtime.cutab
// for i=0..#CUs
// for j=0..#max used file index in CU[i]
// uint32 offset into runtime.filetab for the filename[j]
//
// runtime.filetab
// []null terminated filename strings
//
// runtime.pctab
// []byte of deduplicated pc data.
//
// runtime.functab
// function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes]
// end PC [thearch.ptrsize bytes]
// func structures, pcdata offsets, func data.
state, compUnits, funcs := makePclntab(ctxt, container)
ldr := ctxt.loader
state.carrier = ldr.LookupOrCreateSym("runtime.pclntab", 0)
ldr.MakeSymbolUpdater(state.carrier).SetType(sym.SPCLNTAB)
ldr.SetAttrReachable(state.carrier, true)
setCarrierSym(sym.SPCLNTAB, state.carrier)
state.generatePCHeader(ctxt)
nameOffsets := state.generateFuncnametab(ctxt, funcs)
cuOffsets := state.generateFilenameTabs(ctxt, compUnits, funcs)
state.generatePctab(ctxt, funcs)
inlSyms := makeInlSyms(ctxt, funcs, nameOffsets)
state.generateFunctab(ctxt, funcs, inlSyms, cuOffsets, nameOffsets)
return state
}
func gorootFinal() string {
root := buildcfg.GOROOT
if final := os.Getenv("GOROOT_FINAL"); final != "" {
root = final
}
return root
}
func expandGoroot(s string) string {
const n = len("$GOROOT")
if len(s) >= n+1 && s[:n] == "$GOROOT" && (s[n] == '/' || s[n] == '\\') {
return filepath.ToSlash(filepath.Join(gorootFinal(), s[n:]))
}
return s
}
const (
BUCKETSIZE = 256 * MINFUNC
SUBBUCKETS = 16
SUBBUCKETSIZE = BUCKETSIZE / SUBBUCKETS
NOIDX = 0x7fffffff
)
// findfunctab generates a lookup table to quickly find the containing
// function for a pc. See src/runtime/symtab.go:findfunc for details.
func (ctxt *Link) findfunctab(state *pclntab, container loader.Bitmap) {
ldr := ctxt.loader
// find min and max address
min := ldr.SymValue(ctxt.Textp[0])
lastp := ctxt.Textp[len(ctxt.Textp)-1]
max := ldr.SymValue(lastp) + ldr.SymSize(lastp)
// for each subbucket, compute the minimum of all symbol indexes
// that map to that subbucket.
n := int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE)
nbuckets := int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE)
size := 4*int64(nbuckets) + int64(n)
writeFindFuncTab := func(_ *Link, s loader.Sym) {
t := ldr.MakeSymbolUpdater(s)
indexes := make([]int32, n)
for i := int32(0); i < n; i++ {
indexes[i] = NOIDX
}
idx := int32(0)
for i, s := range ctxt.Textp {
if !emitPcln(ctxt, s, container) {
continue
}
p := ldr.SymValue(s)
var e loader.Sym
i++
if i < len(ctxt.Textp) {
e = ctxt.Textp[i]
}
for e != 0 && !emitPcln(ctxt, e, container) && i < len(ctxt.Textp) {
e = ctxt.Textp[i]
i++
}
q := max
if e != 0 {
q = ldr.SymValue(e)
}
//print("%d: [%lld %lld] %s\n", idx, p, q, s->name);
for ; p < q; p += SUBBUCKETSIZE {
i = int((p - min) / SUBBUCKETSIZE)
if indexes[i] > idx {
indexes[i] = idx
}
}
i = int((q - 1 - min) / SUBBUCKETSIZE)
if indexes[i] > idx {
indexes[i] = idx
}
idx++
}
// fill in table
for i := int32(0); i < nbuckets; i++ {
base := indexes[i*SUBBUCKETS]
if base == NOIDX {
Errorf(nil, "hole in findfunctab")
}
t.SetUint32(ctxt.Arch, int64(i)*(4+SUBBUCKETS), uint32(base))
for j := int32(0); j < SUBBUCKETS && i*SUBBUCKETS+j < n; j++ {
idx = indexes[i*SUBBUCKETS+j]
if idx == NOIDX {
Errorf(nil, "hole in findfunctab")
}
if idx-base >= 256 {
Errorf(nil, "too many functions in a findfunc bucket! %d/%d %d %d", i, nbuckets, j, idx-base)
}
t.SetUint8(ctxt.Arch, int64(i)*(4+SUBBUCKETS)+4+int64(j), uint8(idx-base))
}
}
}
state.findfunctab = ctxt.createGeneratorSymbol("runtime.findfunctab", 0, sym.SRODATA, size, writeFindFuncTab)
ldr.SetAttrReachable(state.findfunctab, true)
ldr.SetAttrLocal(state.findfunctab, true)
}
// findContainerSyms returns a bitmap, indexed by symbol number, where there's
// a 1 for every container symbol.
func (ctxt *Link) findContainerSyms() loader.Bitmap {
ldr := ctxt.loader
container := loader.MakeBitmap(ldr.NSym())
// Find container symbols and mark them as such.
for _, s := range ctxt.Textp {
outer := ldr.OuterSym(s)
if outer != 0 {
container.Set(outer)
}
}
return container
}
| [
"\"GOROOT_FINAL\""
]
| []
| [
"GOROOT_FINAL"
]
| [] | ["GOROOT_FINAL"] | go | 1 | 0 | |
integration/try/try.go | package try
import (
"fmt"
"math"
"net/http"
"os"
"time"
"github.com/traefik/traefik/log"
)
const (
// CITimeoutMultiplier is the multiplier for all timeout in the CI
CITimeoutMultiplier = 3
maxInterval = 5 * time.Second
)
type timedAction func(timeout time.Duration, operation DoCondition) error
// Sleep pauses the current goroutine for at least the duration d.
// Deprecated: Use only when use an other Try[...] functions is not possible.
func Sleep(d time.Duration) {
d = applyCIMultiplier(d)
time.Sleep(d)
}
// Response is like Request, but returns the response for further
// processing at the call site.
// Conditions are not allowed since it would complicate signaling if the
// response body needs to be closed or not. Callers are expected to close on
// their own if the function returns a nil error.
func Response(req *http.Request, timeout time.Duration) (*http.Response, error) {
return doTryRequest(req, timeout, nil)
}
// ResponseUntilStatusCode is like Request, but returns the response for further
// processing at the call site.
// Conditions are not allowed since it would complicate signaling if the
// response body needs to be closed or not. Callers are expected to close on
// their own if the function returns a nil error.
func ResponseUntilStatusCode(req *http.Request, timeout time.Duration, statusCode int) (*http.Response, error) {
return doTryRequest(req, timeout, nil, StatusCodeIs(statusCode))
}
// GetRequest is like Do, but runs a request against the given URL and applies
// the condition on the response.
// ResponseCondition may be nil, in which case only the request against the URL must
// succeed.
func GetRequest(url string, timeout time.Duration, conditions ...ResponseCondition) error {
resp, err := doTryGet(url, timeout, nil, conditions...)
if resp != nil && resp.Body != nil {
defer resp.Body.Close()
}
return err
}
// Request is like Do, but runs a request against the given URL and applies
// the condition on the response.
// ResponseCondition may be nil, in which case only the request against the URL must
// succeed.
func Request(req *http.Request, timeout time.Duration, conditions ...ResponseCondition) error {
resp, err := doTryRequest(req, timeout, nil, conditions...)
if resp != nil && resp.Body != nil {
defer resp.Body.Close()
}
return err
}
// RequestWithTransport is like Do, but runs a request against the given URL and applies
// the condition on the response.
// ResponseCondition may be nil, in which case only the request against the URL must
// succeed.
func RequestWithTransport(req *http.Request, timeout time.Duration, transport *http.Transport, conditions ...ResponseCondition) error {
resp, err := doTryRequest(req, timeout, transport, conditions...)
if resp != nil && resp.Body != nil {
defer resp.Body.Close()
}
return err
}
// Do repeatedly executes an operation until no error condition occurs or the
// given timeout is reached, whatever comes first.
func Do(timeout time.Duration, operation DoCondition) error {
if timeout <= 0 {
panic("timeout must be larger than zero")
}
interval := time.Duration(math.Ceil(float64(timeout) / 15.0))
if interval > maxInterval {
interval = maxInterval
}
timeout = applyCIMultiplier(timeout)
var err error
if err = operation(); err == nil {
fmt.Println("+")
return nil
}
fmt.Print("*")
stopTimer := time.NewTimer(timeout)
defer stopTimer.Stop()
retryTick := time.NewTicker(interval)
defer retryTick.Stop()
for {
select {
case <-stopTimer.C:
fmt.Println("-")
return fmt.Errorf("try operation failed: %s", err)
case <-retryTick.C:
fmt.Print("*")
if err = operation(); err == nil {
fmt.Println("+")
return err
}
}
}
}
func doTryGet(url string, timeout time.Duration, transport *http.Transport, conditions ...ResponseCondition) (*http.Response, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
return doTryRequest(req, timeout, transport, conditions...)
}
func doTryRequest(request *http.Request, timeout time.Duration, transport *http.Transport, conditions ...ResponseCondition) (*http.Response, error) {
return doRequest(Do, timeout, request, transport, conditions...)
}
func doRequest(action timedAction, timeout time.Duration, request *http.Request, transport *http.Transport, conditions ...ResponseCondition) (*http.Response, error) {
var resp *http.Response
return resp, action(timeout, func() error {
var err error
client := http.DefaultClient
if transport != nil {
client.Transport = transport
}
resp, err = client.Do(request)
if err != nil {
return err
}
for _, condition := range conditions {
if err := condition(resp); err != nil {
return err
}
}
return nil
})
}
func applyCIMultiplier(timeout time.Duration) time.Duration {
ci := os.Getenv("CI")
if len(ci) > 0 {
log.Debug("Apply CI multiplier:", CITimeoutMultiplier)
return time.Duration(float64(timeout) * CITimeoutMultiplier)
}
return timeout
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
src/restore_kanban_backup/core.py | # coding: utf-8
# Copyright (c) 2019-2020 Latona. All rights reserved.
import os
import subprocess
from aion.microservice import main_decorator, Options
from aion.kanban import Kanban
from aion.logger import lprint, initialize_logger
SERVICE_NAME = os.environ.get("SERVICE", "restore-mongo-kanban-backup")
initialize_logger(SERVICE_NAME)
@main_decorator(SERVICE_NAME)
def main_with_kanban(opt: Options):
lprint("start main_with_kanban()")
# get cache kanban
conn = opt.get_conn()
num = opt.get_number()
kanban = conn.get_one_kanban(SERVICE_NAME, num)
# get output file_list
metadata = kanban.get_metadata()
file_name = metadata.get("file_name")
data_path = kanban.get_data_path()
backup_file = '/var/lib/aion/Data/restore-mongo-kanban-backup_1/'+file_name
lprint(backup_file)
######### main function #############
subprocess.run(['mongoimport', '-h', 'mongo', '--db', 'AionCore',
'--collection', 'kanban', '--file', backup_file])
# output after kanban
conn.output_kanban(
result=True,
connection_key="default"
)
| []
| []
| [
"SERVICE"
]
| [] | ["SERVICE"] | python | 1 | 0 | |
WebService/config.py | import os
base_dir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get(
'SECRET_KEY') or '21bfd9e51ff7d2385b88973944a6425b'
SQLALCHEMY_DATABASE_URI = os.environ.get(
'DATABASE_URL') or 'sqlite:///' + os.path.join(base_dir, 'youtube.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
| []
| []
| [
"SECRET_KEY",
"DATABASE_URL"
]
| [] | ["SECRET_KEY", "DATABASE_URL"] | python | 2 | 0 | |
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/spotinst/spotinst_aws_elastigroup.py | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: spotinst_aws_elastigroup
version_added: 2.5
short_description: Create, update or delete Spotinst AWS Elastigroups
author: Spotinst
description:
- Can create, update, or delete Spotinst AWS Elastigroups
Launch configuration is part of the elastigroup configuration,
so no additional modules are necessary for handling the launch configuration.
You will have to have a credentials file in this location - <home>/.spotinst/credentials
The credentials file must contain a row that looks like this
token = <YOUR TOKEN>
Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
requirements:
- spotinst >= 1.0.21
- python >= 2.7
options:
credentials_path:
description:
- (String) Optional parameter that allows to set a non-default credentials path.
Default is ~/.spotinst/credentials
account_id:
description:
- (String) Optional parameter that allows to set an account-id inside the module configuration
By default this is retrieved from the credentials path
availability_vs_cost:
choices:
- availabilityOriented
- costOriented
- balanced
description:
- (String) The strategy orientation.
required: true
availability_zones:
description:
- (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are
name (String),
subnet_id (String),
placement_group_name (String),
required: true
block_device_mappings:
description:
- (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances;
You can specify virtual devices and EBS volumes.;
'[{"key":"value", "key":"value"}]';
keys allowed are
device_name (List of Strings),
virtual_name (String),
no_device (String),
ebs (Object, expects the following keys-
delete_on_termination(Boolean),
encrypted(Boolean),
iops (Integer),
snapshot_id(Integer),
volume_type(String),
volume_size(Integer))
chef:
description:
- (Object) The Chef integration configuration.;
Expects the following keys - chef_server (String),
organization (String),
user (String),
pem_key (String),
chef_version (String)
draining_timeout:
description:
- (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination.
ebs_optimized:
description:
- (Boolean) Enable EBS optimization for supported instances which are not enabled by default.;
Note - additional charges will be applied.
ebs_volume_pool:
description:
- (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
'[{"key":"value", "key":"value"}]';
keys allowed are -
volume_ids (List of Strings),
device_name (String)
ecs:
description:
- (Object) The ECS integration configuration.;
Expects the following key -
cluster_name (String)
elastic_ips:
description:
- (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
fallback_to_od:
description:
- (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead
health_check_grace_period:
description:
- (Integer) The amount of time, in seconds, after the instance has launched to start and check its health.
default: 300
health_check_unhealthy_duration_before_replacement:
description:
- (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
health_check_type:
choices:
- ELB
- HCS
- TARGET_GROUP
- MLB
- EC2
description:
- (String) The service to use for the health check.
iam_role_name:
description:
- (String) The instance profile iamRole name
- Only use iam_role_arn, or iam_role_name
iam_role_arn:
description:
- (String) The instance profile iamRole arn
- Only use iam_role_arn, or iam_role_name
id:
description:
- (String) The group id if it already exists and you want to update, or delete it.
This will not work unless the uniqueness_by field is set to id.
When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
ignore_changes:
choices:
- image_id
- target
description:
- (List of Strings) list of fields on which changes should be ignored when updating
image_id:
description:
- (String) The image Id used to launch the instance.;
In case of conflict between Instance type and image type, an error will be returned
required: true
key_pair:
description:
- (String) Specify a Key Pair to attach to the instances
required: true
kubernetes:
description:
- (Object) The Kubernetes integration configuration.
Expects the following keys -
api_server (String),
token (String)
lifetime_period:
description:
- (String) lifetime period
load_balancers:
description:
- (List of Strings) List of classic ELB names
max_size:
description:
- (Integer) The upper limit number of instances that you can scale up to
required: true
mesosphere:
description:
- (Object) The Mesosphere integration configuration.
Expects the following key -
api_server (String)
min_size:
description:
- (Integer) The lower limit number of instances that you can scale down to
required: true
monitoring:
description:
- (Boolean) Describes whether instance Enhanced Monitoring is enabled
required: true
name:
description:
- (String) Unique name for elastigroup to be created, updated or deleted
required: true
network_interfaces:
description:
- (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
description (String),
device_index (Integer),
secondary_private_ip_address_count (Integer),
associate_public_ip_address (Boolean),
delete_on_termination (Boolean),
groups (List of Strings),
network_interface_id (String),
private_ip_address (String),
subnet_id (String),
associate_ipv6_address (Boolean),
private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
on_demand_count:
description:
- (Integer) Required if risk is not set
- Number of on demand instances to launch. All other instances will be spot instances.;
Either set this parameter or the risk parameter
on_demand_instance_type:
description:
- (String) On-demand instance type that will be provisioned
required: true
opsworks:
description:
- (Object) The elastigroup OpsWorks integration configration.;
Expects the following key -
layer_id (String)
persistence:
description:
- (Object) The Stateful elastigroup configration.;
Accepts the following keys -
should_persist_root_device (Boolean),
should_persist_block_devices (Boolean),
should_persist_private_ip (Boolean)
product:
choices:
- Linux/UNIX
- SUSE Linux
- Windows
- Linux/UNIX (Amazon VPC)
- SUSE Linux (Amazon VPC)
- Windows
description:
- (String) Operation system type._
required: true
rancher:
description:
- (Object) The Rancher integration configuration.;
Expects the following keys -
access_key (String),
secret_key (String),
master_host (String)
right_scale:
description:
- (Object) The Rightscale integration configuration.;
Expects the following keys -
account_id (String),
refresh_token (String)
risk:
description:
- (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
roll_config:
description:
- (Object) Roll configuration.;
If you would like the group to roll after updating, please use this feature.
Accepts the following keys -
batch_size_percentage(Integer, Required),
grace_period - (Integer, Required),
health_check_type(String, Optional)
scheduled_tasks:
description:
- (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
adjustment (Integer),
scale_target_capacity (Integer),
scale_min_capacity (Integer),
scale_max_capacity (Integer),
adjustment_percentage (Integer),
batch_size_percentage (Integer),
cron_expression (String),
frequency (String),
grace_period (Integer),
task_type (String, required),
is_enabled (Boolean)
security_group_ids:
description:
- (List of Strings) One or more security group IDs. ;
In case of update it will override the existing Security Group with the new given array
required: true
shutdown_script:
description:
- (String) The Base64-encoded shutdown script that executes prior to instance termination.
Encode before setting.
signals:
description:
- (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup;
keys allowed are -
name (String, required),
timeout (Integer)
spin_up_time:
description:
- (Integer) spin up time, in seconds, for the instance
spot_instance_types:
description:
- (List of Strings) Spot instance type that will be provisioned.
required: true
state:
choices:
- present
- absent
description:
- (String) create or delete the elastigroup
tags:
description:
- (List of tagKey:tagValue paris) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
target:
description:
- (Integer) The number of instances to launch
required: true
target_group_arns:
description:
- (List of Strings) List of target group arns instances should be registered to
tenancy:
choices:
- default
- dedicated
description:
- (String) dedicated vs shared tenancy
terminate_at_end_of_billing_hour:
description:
- (Boolean) terminate at the end of billing hour
unit:
choices:
- instance
- weight
description:
- (String) The capacity unit to launch instances by.
required: true
up_scaling_policies:
description:
- (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
metric_name (String, required),
dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
statistic (String, required)
evaluation_periods (String, required),
period (String, required),
threshold (String, required),
cooldown (String, required),
unit (String, required),
operator (String, required),
action_type (String, required),
adjustment (String),
min_target_capacity (String),
target (String),
maximum (String),
minimum (String)
down_scaling_policies:
description:
- (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
metric_name (String, required),
dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
statistic (String, required),
evaluation_periods (String, required),
period (String, required),
threshold (String, required),
cooldown (String, required),
unit (String, required),
operator (String, required),
action_type (String, required),
adjustment (String),
max_target_capacity (String),
target (String),
maximum (String),
minimum (String)
target_tracking_policies:
description:
- (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup;
'[{"key":"value", "key":"value"}]';
keys allowed are -
policy_name (String, required),
namespace (String, required),
source (String, required),
metric_name (String, required),
statistic (String, required),
unit (String, required),
cooldown (String, required),
target (String, required)
uniqueness_by:
choices:
- id
- name
description:
- (String) If your group names are not unique, you may use this feature to update or delete a specific group.
Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
user_data:
description:
- (String) Base64-encoded MIME user data. Encode before setting the value.
utilize_reserved_instances:
description:
- (Boolean) In case of any available Reserved Instances,
Elastigroup will utilize your reservations before purchasing Spot instances.
wait_for_instances:
description:
- (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin
wait_timeout:
description:
- (Integer) How long the module should wait for instances before failing the action.;
Only works if wait_for_instances is True.
"""
EXAMPLES = '''
# Basic configuration YAML example
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
account_id: act-1a9dd2b
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
tags:
- Environment: someEnvValue
- OtherTagKey: otherValue
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 5
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group-tal
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-8f4b8fe9
block_device_mappings:
- device_name: '/dev/sda1'
ebs:
volume_size: 100
volume_type: gp2
spot_instance_types:
- c3.large
do_not_update:
- image_id
wait_for_instances: True
wait_timeout: 600
register: result
- name: Store private ips to file
shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
with_items: "{{ result.instances }}"
- debug: var=result
# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
# In organizations with more than one account, it is required to specify an account_id
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
account_id: act-1a9dd2b
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
tags:
- Environment: someEnvValue
- OtherTagKey: otherValue
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 5
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group-tal
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-8f4b8fe9
block_device_mappings:
- device_name: '/dev/xvda'
ebs:
volume_size: 60
volume_type: gp2
- device_name: '/dev/xvdb'
ebs:
volume_size: 120
volume_type: gp2
spot_instance_types:
- c3.large
do_not_update:
- image_id
wait_for_instances: True
wait_timeout: 600
register: result
- name: Store private ips to file
shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
with_items: "{{ result.instances }}"
- debug: var=result
# In this example we have set up block device mapping with ephemeral devices
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
block_device_mappings:
- device_name: '/dev/xvda'
virtual_name: ephemeral0
- device_name: '/dev/xvdb/'
virtual_name: ephemeral1
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example we create a basic group configuration with a network interface defined.
# Each network interface must have a device index
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
state: present
risk: 100
availability_vs_cost: balanced
network_interfaces:
- associate_public_ip_address: true
device_index: 0
availability_zones:
- name: us-west-2a
subnet_id: subnet-2b68a15c
image_id: ami-f173cc91
key_pair: spotinst-oregon
max_size: 15
min_size: 0
target: 0
unit: instance
monitoring: True
name: ansible-group
on_demand_instance_type: c3.large
product: Linux/UNIX
load_balancers:
- test-lb-1
security_group_ids:
- sg-8f4b8fe9
spot_instance_types:
- c3.large
do_not_update:
- image_id
- target
register: result
- debug: var=result
# In this example we create a basic group configuration with a target tracking scaling policy defined
- hosts: localhost
tasks:
- name: create elastigroup
spotinst_aws_elastigroup:
account_id: act-92d45673
state: present
risk: 100
availability_vs_cost: balanced
availability_zones:
- name: us-west-2a
subnet_id: subnet-79da021e
image_id: ami-f173cc91
fallback_to_od: true
tags:
- Creator: ValueOfCreatorTag
- Environment: ValueOfEnvironmentTag
key_pair: spotinst-labs-oregon
max_size: 10
min_size: 0
target: 2
unit: instance
monitoring: True
name: ansible-group-1
on_demand_instance_type: c3.large
product: Linux/UNIX
security_group_ids:
- sg-46cdc13d
spot_instance_types:
- c3.large
target_tracking_policies:
- policy_name: target-tracking-1
namespace: AWS/EC2
metric_name: CPUUtilization
statistic: average
unit: percent
target: 50
cooldown: 120
do_not_update:
- image_id
register: result
- debug: var=result
'''
RETURN = '''
---
instances:
description: List of active elastigroup instances and their details.
returned: success
type: dict
sample: [
{
"spotInstanceRequestId": "sir-regs25zp",
"instanceId": "i-09640ad8678234c",
"instanceType": "m4.large",
"product": "Linux/UNIX",
"availabilityZone": "us-west-2b",
"privateIp": "180.0.2.244",
"createdAt": "2017-07-17T12:46:18.000Z",
"status": "fulfilled"
}
]
group_id:
description: Created / Updated group's ID.
returned: success
type: string
sample: "sig-12345"
'''
HAS_SPOTINST_SDK = False
__metaclass__ = type
import os
import time
from ansible.module_utils.basic import AnsibleModule
try:
import spotinst
from spotinst import SpotinstClientException
HAS_SPOTINST_SDK = True
except ImportError:
pass
eni_fields = ('description',
'device_index',
'secondary_private_ip_address_count',
'associate_public_ip_address',
'delete_on_termination',
'groups',
'network_interface_id',
'private_ip_address',
'subnet_id',
'associate_ipv6_address')
private_ip_fields = ('private_ip_address',
'primary')
capacity_fields = (dict(ansible_field_name='min_size',
spotinst_field_name='minimum'),
dict(ansible_field_name='max_size',
spotinst_field_name='maximum'),
'target',
'unit')
lspec_fields = ('user_data',
'key_pair',
'tenancy',
'shutdown_script',
'monitoring',
'ebs_optimized',
'image_id',
'health_check_type',
'health_check_grace_period',
'health_check_unhealthy_duration_before_replacement',
'security_group_ids')
iam_fields = (dict(ansible_field_name='iam_role_name',
spotinst_field_name='name'),
dict(ansible_field_name='iam_role_arn',
spotinst_field_name='arn'))
scheduled_task_fields = ('adjustment',
'adjustment_percentage',
'batch_size_percentage',
'cron_expression',
'frequency',
'grace_period',
'task_type',
'is_enabled',
'scale_target_capacity',
'scale_min_capacity',
'scale_max_capacity')
scaling_policy_fields = ('policy_name',
'namespace',
'metric_name',
'dimensions',
'statistic',
'evaluation_periods',
'period',
'threshold',
'cooldown',
'unit',
'operator')
tracking_policy_fields = ('policy_name',
'namespace',
'source',
'metric_name',
'statistic',
'unit',
'cooldown',
'target',
'threshold')
action_fields = (dict(ansible_field_name='action_type',
spotinst_field_name='type'),
'adjustment',
'min_target_capacity',
'max_target_capacity',
'target',
'minimum',
'maximum')
signal_fields = ('name',
'timeout')
multai_lb_fields = ('balancer_id',
'project_id',
'target_set_id',
'az_awareness',
'auto_weight')
persistence_fields = ('should_persist_root_device',
'should_persist_block_devices',
'should_persist_private_ip')
strategy_fields = ('risk',
'utilize_reserved_instances',
'fallback_to_od',
'on_demand_count',
'availability_vs_cost',
'draining_timeout',
'spin_up_time',
'lifetime_period')
ebs_fields = ('delete_on_termination',
'encrypted',
'iops',
'snapshot_id',
'volume_type',
'volume_size')
bdm_fields = ('device_name',
'virtual_name',
'no_device')
kubernetes_fields = ('api_server',
'token')
right_scale_fields = ('account_id',
'refresh_token')
rancher_fields = ('access_key',
'secret_key',
'master_host')
chef_fields = ('chef_server',
'organization',
'user',
'pem_key',
'chef_version')
az_fields = ('name',
'subnet_id',
'placement_group_name')
opsworks_fields = ('layer_id',)
scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
mesosphere_fields = ('api_server',)
ecs_fields = ('cluster_name',)
multai_fields = ('multai_token',)
def handle_elastigroup(client, module):
has_changed = False
should_create = False
group_id = None
message = 'None'
name = module.params.get('name')
state = module.params.get('state')
uniqueness_by = module.params.get('uniqueness_by')
external_group_id = module.params.get('id')
if uniqueness_by == 'id':
if external_group_id is None:
should_create = True
else:
should_create = False
group_id = external_group_id
else:
groups = client.get_elastigroups()
should_create, group_id = find_group_with_same_name(groups, name)
if should_create is True:
if state == 'present':
eg = expand_elastigroup(module, is_update=False)
module.debug(str(" [INFO] " + message + "\n"))
group = client.create_elastigroup(group=eg)
group_id = group['id']
message = 'Created group Successfully.'
has_changed = True
elif state == 'absent':
message = 'Cannot delete non-existent group.'
has_changed = False
else:
eg = expand_elastigroup(module, is_update=True)
if state == 'present':
group = client.update_elastigroup(group_update=eg, group_id=group_id)
message = 'Updated group successfully.'
try:
roll_config = module.params.get('roll_config')
if roll_config:
eg_roll = spotinst.aws_elastigroup.Roll(
batch_size_percentage=roll_config.get('batch_size_percentage'),
grace_period=roll_config.get('grace_period'),
health_check_type=roll_config.get('health_check_type')
)
roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
message = 'Updated and started rolling the group successfully.'
except SpotinstClientException as exc:
message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
has_changed = True
elif state == 'absent':
try:
client.delete_elastigroup(group_id=group_id)
except SpotinstClientException as exc:
if "GROUP_DOESNT_EXIST" in exc.message:
pass
else:
module.fail_json(msg="Error while attempting to delete group : " + exc.message)
message = 'Deleted group successfully.'
has_changed = True
return group_id, message, has_changed
def retrieve_group_instances(client, module, group_id):
wait_timeout = module.params.get('wait_timeout')
wait_for_instances = module.params.get('wait_for_instances')
if wait_timeout is None:
wait_timeout = 300
wait_timeout = time.time() + wait_timeout
target = module.params.get('target')
state = module.params.get('state')
instances = list()
if state == 'present' and group_id is not None and wait_for_instances is True:
is_amount_fulfilled = False
while is_amount_fulfilled is False and wait_timeout > time.time():
instances = list()
amount_of_fulfilled_instances = 0
active_instances = client.get_elastigroup_active_instances(group_id=group_id)
for active_instance in active_instances:
if active_instance.get('private_ip') is not None:
amount_of_fulfilled_instances += 1
instances.append(active_instance)
if amount_of_fulfilled_instances >= target:
is_amount_fulfilled = True
time.sleep(10)
return instances
def find_group_with_same_name(groups, name):
for group in groups:
if group['name'] == name:
return False, group.get('id')
return True, None
def expand_elastigroup(module, is_update):
do_not_update = module.params['do_not_update']
name = module.params.get('name')
eg = spotinst.aws_elastigroup.Elastigroup()
description = module.params.get('description')
if name is not None:
eg.name = name
if description is not None:
eg.description = description
# Capacity
expand_capacity(eg, module, is_update, do_not_update)
# Strategy
expand_strategy(eg, module)
# Scaling
expand_scaling(eg, module)
# Third party integrations
expand_integrations(eg, module)
# Compute
expand_compute(eg, module, is_update, do_not_update)
# Multai
expand_multai(eg, module)
# Scheduling
expand_scheduled_tasks(eg, module)
return eg
def expand_compute(eg, module, is_update, do_not_update):
elastic_ips = module.params['elastic_ips']
on_demand_instance_type = module.params.get('on_demand_instance_type')
spot_instance_types = module.params['spot_instance_types']
ebs_volume_pool = module.params['ebs_volume_pool']
availability_zones_list = module.params['availability_zones']
product = module.params.get('product')
eg_compute = spotinst.aws_elastigroup.Compute()
if product is not None:
# Only put product on group creation
if is_update is not True:
eg_compute.product = product
if elastic_ips is not None:
eg_compute.elastic_ips = elastic_ips
if on_demand_instance_type or spot_instance_types is not None:
eg_instance_types = spotinst.aws_elastigroup.InstanceTypes()
if on_demand_instance_type is not None:
eg_instance_types.spot = spot_instance_types
if spot_instance_types is not None:
eg_instance_types.ondemand = on_demand_instance_type
if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
eg_compute.instance_types = eg_instance_types
expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
expand_launch_spec(eg_compute, module, is_update, do_not_update)
eg.compute = eg_compute
def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
if ebs_volumes_list is not None:
eg_volumes = []
for volume in ebs_volumes_list:
eg_volume = spotinst.aws_elastigroup.EbsVolume()
if volume.get('device_name') is not None:
eg_volume.device_name = volume.get('device_name')
if volume.get('volume_ids') is not None:
eg_volume.volume_ids = volume.get('volume_ids')
if eg_volume.device_name is not None:
eg_volumes.append(eg_volume)
if len(eg_volumes) > 0:
eg_compute.ebs_volume_pool = eg_volumes
def expand_launch_spec(eg_compute, module, is_update, do_not_update):
eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None:
eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
tags = module.params['tags']
load_balancers = module.params['load_balancers']
target_group_arns = module.params['target_group_arns']
block_device_mappings = module.params['block_device_mappings']
network_interfaces = module.params['network_interfaces']
if is_update is True:
if 'image_id' in do_not_update:
delattr(eg_launch_spec, 'image_id')
expand_tags(eg_launch_spec, tags)
expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns)
expand_block_device_mappings(eg_launch_spec, block_device_mappings)
expand_network_interfaces(eg_launch_spec, network_interfaces)
eg_compute.launch_specification = eg_launch_spec
def expand_integrations(eg, module):
rancher = module.params.get('rancher')
mesosphere = module.params.get('mesosphere')
ecs = module.params.get('ecs')
kubernetes = module.params.get('kubernetes')
right_scale = module.params.get('right_scale')
opsworks = module.params.get('opsworks')
chef = module.params.get('chef')
integration_exists = False
eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations()
if mesosphere is not None:
eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
integration_exists = True
if ecs is not None:
eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration')
integration_exists = True
if kubernetes is not None:
eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration')
integration_exists = True
if right_scale is not None:
eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
integration_exists = True
if opsworks is not None:
eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
integration_exists = True
if rancher is not None:
eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
integration_exists = True
if chef is not None:
eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
integration_exists = True
if integration_exists:
eg.third_parties_integration = eg_integrations
def expand_capacity(eg, module, is_update, do_not_update):
eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
if is_update is True:
delattr(eg_capacity, 'unit')
if 'target' in do_not_update:
delattr(eg_capacity, 'target')
eg.capacity = eg_capacity
def expand_strategy(eg, module):
persistence = module.params.get('persistence')
signals = module.params.get('signals')
eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
if terminate_at_end_of_billing_hour is not None:
eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields,
module.params, 'ScalingStrategy')
if persistence is not None:
eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
if signals is not None:
eg_signals = expand_list(signals, signal_fields, 'Signal')
if len(eg_signals) > 0:
eg_strategy.signals = eg_signals
eg.strategy = eg_strategy
def expand_multai(eg, module):
multai_load_balancers = module.params.get('multai_load_balancers')
eg_multai = expand_fields(multai_fields, module.params, 'Multai')
if multai_load_balancers is not None:
eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
if len(eg_multai_load_balancers) > 0:
eg_multai.balancers = eg_multai_load_balancers
eg.multai = eg_multai
def expand_scheduled_tasks(eg, module):
scheduled_tasks = module.params.get('scheduled_tasks')
if scheduled_tasks is not None:
eg_scheduling = spotinst.aws_elastigroup.Scheduling()
eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
if len(eg_tasks) > 0:
eg_scheduling.tasks = eg_tasks
eg.scheduling = eg_scheduling
def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns):
if load_balancers is not None or target_group_arns is not None:
eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig()
eg_total_lbs = []
if load_balancers is not None:
for elb_name in load_balancers:
eg_elb = spotinst.aws_elastigroup.LoadBalancer()
if elb_name is not None:
eg_elb.name = elb_name
eg_elb.type = 'CLASSIC'
eg_total_lbs.append(eg_elb)
if target_group_arns is not None:
for target_arn in target_group_arns:
eg_elb = spotinst.aws_elastigroup.LoadBalancer()
if target_arn is not None:
eg_elb.arn = target_arn
eg_elb.type = 'TARGET_GROUP'
eg_total_lbs.append(eg_elb)
if len(eg_total_lbs) > 0:
eg_load_balancers_config.load_balancers = eg_total_lbs
eg_launchspec.load_balancers_config = eg_load_balancers_config
def expand_tags(eg_launchspec, tags):
if tags is not None:
eg_tags = []
for tag in tags:
eg_tag = spotinst.aws_elastigroup.Tag()
if tag.keys():
eg_tag.tag_key = tag.keys()[0]
if tag.values():
eg_tag.tag_value = tag.values()[0]
eg_tags.append(eg_tag)
if len(eg_tags) > 0:
eg_launchspec.tags = eg_tags
def expand_block_device_mappings(eg_launchspec, bdms):
if bdms is not None:
eg_bdms = []
for bdm in bdms:
eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
if bdm.get('ebs') is not None:
eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
eg_bdms.append(eg_bdm)
if len(eg_bdms) > 0:
eg_launchspec.block_device_mappings = eg_bdms
def expand_network_interfaces(eg_launchspec, enis):
if enis is not None:
eg_enis = []
for eni in enis:
eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
if eg_pias is not None:
eg_eni.private_ip_addresses = eg_pias
eg_enis.append(eg_eni)
if len(eg_enis) > 0:
eg_launchspec.network_interfaces = eg_enis
def expand_scaling(eg, module):
up_scaling_policies = module.params['up_scaling_policies']
down_scaling_policies = module.params['down_scaling_policies']
target_tracking_policies = module.params['target_tracking_policies']
eg_scaling = spotinst.aws_elastigroup.Scaling()
if up_scaling_policies is not None:
eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
if len(eg_up_scaling_policies) > 0:
eg_scaling.up = eg_up_scaling_policies
if down_scaling_policies is not None:
eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
if len(eg_down_scaling_policies) > 0:
eg_scaling.down = eg_down_scaling_policies
if target_tracking_policies is not None:
eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
if len(eg_target_tracking_policies) > 0:
eg_scaling.target = eg_target_tracking_policies
if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
eg.scaling = eg_scaling
def expand_list(items, fields, class_name):
if items is not None:
new_objects_list = []
for item in items:
new_obj = expand_fields(fields, item, class_name)
new_objects_list.append(new_obj)
return new_objects_list
def expand_fields(fields, item, class_name):
class_ = getattr(spotinst.aws_elastigroup, class_name)
new_obj = class_()
# Handle primitive fields
if item is not None:
for field in fields:
if isinstance(field, dict):
ansible_field_name = field['ansible_field_name']
spotinst_field_name = field['spotinst_field_name']
else:
ansible_field_name = field
spotinst_field_name = field
if item.get(ansible_field_name) is not None:
setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
return new_obj
def expand_scaling_policies(scaling_policies):
eg_scaling_policies = []
for policy in scaling_policies:
eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
eg_scaling_policies.append(eg_policy)
return eg_scaling_policies
def expand_target_tracking_policies(tracking_policies):
eg_tracking_policies = []
for policy in tracking_policies:
eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
eg_tracking_policies.append(eg_policy)
return eg_tracking_policies
def main():
fields = dict(
account_id=dict(type='str'),
availability_vs_cost=dict(type='str', required=True),
availability_zones=dict(type='list', required=True),
block_device_mappings=dict(type='list'),
chef=dict(type='dict'),
credentials_path=dict(type='path', default="~/.spotinst/credentials"),
do_not_update=dict(default=[], type='list'),
down_scaling_policies=dict(type='list'),
draining_timeout=dict(type='int'),
ebs_optimized=dict(type='bool'),
ebs_volume_pool=dict(type='list'),
ecs=dict(type='dict'),
elastic_beanstalk=dict(type='dict'),
elastic_ips=dict(type='list'),
fallback_to_od=dict(type='bool'),
id=dict(type='str'),
health_check_grace_period=dict(type='int'),
health_check_type=dict(type='str'),
health_check_unhealthy_duration_before_replacement=dict(type='int'),
iam_role_arn=dict(type='str'),
iam_role_name=dict(type='str'),
image_id=dict(type='str', required=True),
key_pair=dict(type='str'),
kubernetes=dict(type='dict'),
lifetime_period=dict(type='int'),
load_balancers=dict(type='list'),
max_size=dict(type='int', required=True),
mesosphere=dict(type='dict'),
min_size=dict(type='int', required=True),
monitoring=dict(type='str'),
multai_load_balancers=dict(type='list'),
multai_token=dict(type='str'),
name=dict(type='str', required=True),
network_interfaces=dict(type='list'),
on_demand_count=dict(type='int'),
on_demand_instance_type=dict(type='str'),
opsworks=dict(type='dict'),
persistence=dict(type='dict'),
product=dict(type='str', required=True),
rancher=dict(type='dict'),
right_scale=dict(type='dict'),
risk=dict(type='int'),
roll_config=dict(type='dict'),
scheduled_tasks=dict(type='list'),
security_group_ids=dict(type='list', required=True),
shutdown_script=dict(type='str'),
signals=dict(type='list'),
spin_up_time=dict(type='int'),
spot_instance_types=dict(type='list', required=True),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list'),
target=dict(type='int', required=True),
target_group_arns=dict(type='list'),
tenancy=dict(type='str'),
terminate_at_end_of_billing_hour=dict(type='bool'),
token=dict(type='str'),
unit=dict(type='str'),
user_data=dict(type='str'),
utilize_reserved_instances=dict(type='bool'),
uniqueness_by=dict(default='name', choices=['name', 'id']),
up_scaling_policies=dict(type='list'),
target_tracking_policies=dict(type='list'),
wait_for_instances=dict(type='bool', default=False),
wait_timeout=dict(type='int')
)
module = AnsibleModule(argument_spec=fields)
if not HAS_SPOTINST_SDK:
module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst)")
# Retrieve creds file variables
creds_file_loaded_vars = dict()
credentials_path = module.params.get('credentials_path')
try:
with open(credentials_path, "r") as creds:
for line in creds:
eq_index = line.find('=')
var_name = line[:eq_index].strip()
string_value = line[eq_index + 1:].strip()
creds_file_loaded_vars[var_name] = string_value
except IOError:
pass
# End of creds file retrieval
token = module.params.get('token')
if not token:
token = os.environ.get('SPOTINST_TOKEN')
if not token:
token = creds_file_loaded_vars.get("token")
account = module.params.get('account_id')
if not account:
account = os.environ.get('ACCOUNT')
if not account:
account = creds_file_loaded_vars.get("account")
client = spotinst.SpotinstClient(auth_token=token, print_output=False)
if account is not None:
client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account)
group_id, message, has_changed = handle_elastigroup(client=client, module=module)
instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
if __name__ == '__main__':
main()
| []
| []
| [
"ACCOUNT",
"SPOTINST_TOKEN"
]
| [] | ["ACCOUNT", "SPOTINST_TOKEN"] | python | 2 | 0 | |
distsql/request_builder_test.go | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"os"
"testing"
. "github.com/pingcap/check"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/stringutil"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tipb/go-tipb"
)
var _ = Suite(&testSuite{})
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
TestingT(t)
}
var _ = Suite(&testSuite{})
type testSuite struct {
sctx sessionctx.Context
}
func (s *testSuite) SetUpSuite(c *C) {
ctx := mock.NewContext()
ctx.GetSessionVars().StmtCtx = &stmtctx.StatementContext{
MemTracker: memory.NewTracker(stringutil.StringerStr("testSuite"), -1),
DiskTracker: disk.NewTracker(stringutil.StringerStr("testSuite"), -1),
}
ctx.Store = &mock.Store{
Client: &mock.Client{
MockResponse: &mockResponse{
ctx: ctx,
batch: 1,
total: 2,
},
},
}
s.sctx = ctx
}
func (s *testSuite) TearDownSuite(c *C) {
}
func (s *testSuite) SetUpTest(c *C) {
testleak.BeforeTest()
ctx := s.sctx.(*mock.Context)
store := ctx.Store.(*mock.Store)
store.Client = &mock.Client{
MockResponse: &mockResponse{
ctx: ctx,
batch: 1,
total: 2,
},
}
}
func (s *testSuite) TearDownTest(c *C) {
testleak.AfterTest(c)()
}
type handleRange struct {
start int64
end int64
}
func (s *testSuite) getExpectedRanges(tid int64, hrs []*handleRange) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(hrs))
for _, hr := range hrs {
low := codec.EncodeInt(nil, hr.start)
high := codec.EncodeInt(nil, hr.end)
high = []byte(kv.Key(high).PrefixNext())
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
return krs
}
func (s *testSuite) TestTableHandlesToKVRanges(c *C) {
handles := []int64{0, 2, 3, 4, 5, 10, 11, 100, 9223372036854775806, 9223372036854775807}
// Build expected key ranges.
hrs := make([]*handleRange, 0, len(handles))
hrs = append(hrs, &handleRange{start: 0, end: 0})
hrs = append(hrs, &handleRange{start: 2, end: 5})
hrs = append(hrs, &handleRange{start: 10, end: 11})
hrs = append(hrs, &handleRange{start: 100, end: 100})
hrs = append(hrs, &handleRange{start: 9223372036854775806, end: 9223372036854775807})
// Build key ranges.
expect := s.getExpectedRanges(1, hrs)
actual := TableHandlesToKVRanges(1, handles)
// Compare key ranges and expected key ranges.
c.Assert(len(actual), Equals, len(expect))
for i := range actual {
c.Assert(actual[i].StartKey, DeepEquals, expect[i].StartKey)
c.Assert(actual[i].EndKey, DeepEquals, expect[i].EndKey)
}
}
func (s *testSuite) TestTableRangesToKVRanges(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual := TableRangesToKVRanges(13, ranges, nil)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestIndexRangesToKVRanges(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil)
c.Assert(err, IsNil)
for i := range actual {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestRequestBuilder1(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual, err := (&RequestBuilder{}).SetTableRanges(12, ranges, nil).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder2(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual, err := (&RequestBuilder{}).SetIndexRanges(new(stmtctx.StatementContext), 12, 15, ranges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder3(c *C) {
handles := []int64{0, 2, 3, 4, 5, 10, 11, 100}
actual, err := (&RequestBuilder{}).SetTableHandles(15, handles).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder4(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetStreaming(true).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: keyRanges,
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
Streaming: true,
NotFillCache: false,
SyncLog: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder5(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetAnalyzeRequest(&tipb.AnalyzeReq{}).
SetKeepOrder(true).
SetConcurrency(15).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 104,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0},
KeyRanges: keyRanges,
KeepOrder: true,
Desc: false,
Concurrency: 15,
IsolationLevel: kv.RC,
Priority: 1,
NotFillCache: true,
SyncLog: false,
Streaming: false,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder6(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x00, 0x01},
EndKey: kv.Key{0x02, 0x03},
},
}
concurrency := 10
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetChecksumRequest(&tipb.ChecksumRequest{}).
SetConcurrency(concurrency).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 105,
StartTs: 0x0,
Data: []uint8{0x10, 0x0, 0x18, 0x0},
KeyRanges: keyRanges,
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: true,
SyncLog: false,
Streaming: false,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder7(c *C) {
vars := variable.NewSessionVars()
vars.SetReplicaRead(kv.ReplicaReadFollower)
concurrency := 10
actual, err := (&RequestBuilder{}).
SetFromSessionVars(vars).
SetConcurrency(concurrency).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadFollower,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder8(c *C) {
sv := variable.NewSessionVars()
sv.SnapshotInfoschema = infoschema.MockInfoSchemaWithSchemaVer(nil, 10000)
actual, err := (&RequestBuilder{}).
SetFromSessionVars(sv).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
Data: []uint8(nil),
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
MemTracker: (*memory.Tracker)(nil),
ReplicaRead: 0x1,
SchemaVar: 10000,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestTableRangesToKVRangesWithFbs(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
},
}
hist := statistics.NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0)
for i := 0; i < 10; i++ {
hist.Bounds.AppendInt64(0, int64(i))
hist.Bounds.AppendInt64(0, int64(i+2))
hist.Buckets = append(hist.Buckets, statistics.Bucket{Repeat: 10, Count: int64(i + 30)})
}
fb := statistics.NewQueryFeedback(0, hist, 0, false)
lower, upper := types.NewIntDatum(2), types.NewIntDatum(3)
fb.Feedback = []statistics.Feedback{
{Lower: &lower, Upper: &upper, Count: 1, Repeat: 1},
}
actual := TableRangesToKVRanges(0, ranges, fb)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestIndexRangesToKVRangesWithFbs(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
},
}
hist := statistics.NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0)
for i := 0; i < 10; i++ {
hist.Bounds.AppendInt64(0, int64(i))
hist.Bounds.AppendInt64(0, int64(i+2))
hist.Buckets = append(hist.Buckets, statistics.Bucket{Repeat: 10, Count: int64(i + 30)})
}
fb := statistics.NewQueryFeedback(0, hist, 0, false)
lower, upper := types.NewIntDatum(2), types.NewIntDatum(3)
fb.Feedback = []statistics.Feedback{
{Lower: &lower, Upper: &upper, Count: 1, Repeat: 1},
}
actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb)
c.Assert(err, IsNil)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
| [
"\"log_level\""
]
| []
| [
"log_level"
]
| [] | ["log_level"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "validate_json.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
example/hypervolume/hv.py | # coding: utf-8
"""
Hypervolume indicator.
"""
import json
import os
from pygmo import hypervolume
def main():
solution_to_score = json.loads(input())
solutions_scored = json.loads(input())
ys = [s['objective'] for s in solutions_scored]
ys.append(solution_to_score['objective'])
hv = hypervolume(ys)
ref_point = json.loads(os.getenv('HV_REF_POINT', '[1, 1]'))
score = hv.compute(ref_point)
print(score)
if __name__ == '__main__':
main()
| []
| []
| [
"HV_REF_POINT"
]
| [] | ["HV_REF_POINT"] | python | 1 | 0 | |
build/params_shared_vals.go | // +build !testground
package build
import (
"math/big"
"os"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
)
// /////
// Storage
const UnixfsChunkSize uint64 = 1 << 20
const UnixfsLinksPerLevel = 1024
// /////
// Consensus / Network
const AllowableClockDriftSecs = uint64(1)
const NewestNetworkVersion = network.Version3
const ActorUpgradeNetworkVersion = network.Version4
// Epochs
const ForkLengthThreshold = Finality
// Blocks (e)
var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch)
// Epochs
const Finality = miner0.ChainFinality
const MessageConfidence = uint64(5)
// constants for Weight calculation
// The ratio of weight contributed by short-term vs long-term factors in a given round
const WRatioNum = int64(1)
const WRatioDen = uint64(2)
// /////
// Proofs
// Epochs
const SealRandomnessLookback = Finality
// Epochs
const SealRandomnessLookbackLimit = SealRandomnessLookback + 2000 // TODO: Get from spec specs-actors
// Maximum lookback that randomness can be sourced from for a seal proof submission
const MaxSealLookback = SealRandomnessLookbackLimit + 2000 // TODO: Get from specs-actors
// /////
// Mining
// Epochs
const TicketRandomnessLookback = abi.ChainEpoch(1)
const WinningPoStSectorSetLookback = abi.ChainEpoch(10)
// /////
// Address
const AddressMainnetEnvVar = "_mainnet_"
// /////
// Devnet settings
var Devnet = true
const FilBase = uint64(2_000_000_000)
const FilAllocStorageMining = uint64(1_100_000_000)
const FilecoinPrecision = uint64(1_000_000_000_000_000_000)
var InitialRewardBalance *big.Int
// TODO: Move other important consts here
func init() {
InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining))
InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision)))
if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar {
SetAddressNetwork(address.Mainnet)
}
}
// Sync
const BadBlockCacheSize = 1 << 15
// assuming 4000 messages per round, this lets us not lose any messages across a
// 10 block reorg.
const BlsSignatureCacheSize = 40000
// Size of signature verification cache
// 32k keeps the cache around 10MB in size, max
const VerifSigCacheSize = 32000
// ///////
// Limits
// TODO: If this is gonna stay, it should move to specs-actors
const BlockMessageLimit = 10000
const BlockGasLimit = 10_000_000_000
const BlockGasTarget = BlockGasLimit / 2
const BaseFeeMaxChangeDenom = 8 // 12.5%
const InitialBaseFee = 100e6
const MinimumBaseFee = 100
const PackingEfficiencyNum = 4
const PackingEfficiencyDenom = 5
// Actor consts
// TODO: Pull from actors when its made not private
var MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay)
| [
"\"LOTUS_ADDRESS_TYPE\""
]
| []
| [
"LOTUS_ADDRESS_TYPE"
]
| [] | ["LOTUS_ADDRESS_TYPE"] | go | 1 | 0 | |
setup.py | import os
from setuptools import setup, find_packages
if os.environ.get("CI_COMMIT_TAG"):
version = os.environ["CI_COMMIT_TAG"]
else:
version = "0.1.dev{version}".format(version=os.environ.get("CI_JOB_ID", 0))
with open(os.path.join("requirements", "base.txt")) as f:
requirements = f.read().splitlines()
setup(
name="BIDS JSON Schema",
description="Schema for validating BIDS JSON sidecar",
version=version,
author="Gold Standard Phantoms",
author_email="[email protected]",
license="Commercial",
url="https://goldstandardphantoms.com",
packages=find_packages(where="src"),
package_dir={"": "src"},
install_requires=requirements,
)
| []
| []
| [
"CI_COMMIT_TAG",
"CI_JOB_ID"
]
| [] | ["CI_COMMIT_TAG", "CI_JOB_ID"] | python | 2 | 0 | |
Ska/engarchive/fetch_eng.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
from .fetch import * # noqa
from . import fetch
from . import __version__ # noqa
# Module-level units, defaults to CXC units (e.g. Kelvins etc)
UNITS = fetch.Units('eng')
def get_units():
return UNITS['system']
get_units.__doc__ = fetch.get_units.__doc__
def set_units(unit_system):
UNITS.set_units(unit_system)
set_units.__doc__ = fetch.set_units.__doc__
class MSID(fetch.MSID):
__doc__ = fetch.MSID.__doc__
units = UNITS
fetch = sys.modules[__name__]
class Msid(fetch.Msid):
__doc__ = fetch.Msid.__doc__
units = UNITS
fetch = sys.modules[__name__]
class MSIDset(fetch.MSIDset):
__doc__ = fetch.MSIDset.__doc__
MSID = MSID
class Msidset(fetch.Msidset):
__doc__ = fetch.Msidset.__doc__
MSID = MSID
| []
| []
| []
| [] | [] | python | null | null | null |
pymc3/sampling_jax.py | # pylint: skip-file
import os
import re
import warnings
xla_flags = os.getenv("XLA_FLAGS", "").lstrip("--")
xla_flags = re.sub(r"xla_force_host_platform_device_count=.+\s", "", xla_flags).split()
os.environ["XLA_FLAGS"] = " ".join(["--xla_force_host_platform_device_count={}".format(100)])
import arviz as az
import jax
import numpy as np
import pandas as pd
import theano
import theano.sandbox.jax_linker
import theano.sandbox.jaxify
import pymc3 as pm
from pymc3 import modelcontext
warnings.warn("This module is experimental.")
# Disable C compilation by default
# theano.config.cxx = ""
# This will make the JAX Linker the default
# theano.config.mode = "JAX"
def sample_tfp_nuts(
draws=1000,
tune=1000,
chains=4,
target_accept=0.8,
random_seed=10,
model=None,
num_tuning_epoch=2,
num_compute_step_size=500,
):
from tensorflow_probability.substrates import jax as tfp
import jax
model = modelcontext(model)
seed = jax.random.PRNGKey(random_seed)
fgraph = theano.gof.FunctionGraph(model.free_RVs, [model.logpt])
fns = theano.sandbox.jaxify.jax_funcify(fgraph)
logp_fn_jax = fns[0]
rv_names = [rv.name for rv in model.free_RVs]
init_state = [model.test_point[rv_name] for rv_name in rv_names]
init_state_batched = jax.tree_map(lambda x: np.repeat(x[None, ...], chains, axis=0), init_state)
@jax.pmap
def _sample(init_state, seed):
def gen_kernel(step_size):
hmc = tfp.mcmc.NoUTurnSampler(target_log_prob_fn=logp_fn_jax, step_size=step_size)
return tfp.mcmc.DualAveragingStepSizeAdaptation(
hmc, tune // num_tuning_epoch, target_accept_prob=target_accept
)
def trace_fn(_, pkr):
return pkr.new_step_size
def get_tuned_stepsize(samples, step_size):
return step_size[-1] * jax.numpy.std(samples[-num_compute_step_size:])
step_size = jax.tree_map(jax.numpy.ones_like, init_state)
for i in range(num_tuning_epoch - 1):
tuning_hmc = gen_kernel(step_size)
init_samples, tuning_result, kernel_results = tfp.mcmc.sample_chain(
num_results=tune // num_tuning_epoch,
current_state=init_state,
kernel=tuning_hmc,
trace_fn=trace_fn,
return_final_kernel_results=True,
seed=seed,
)
step_size = jax.tree_multimap(get_tuned_stepsize, list(init_samples), tuning_result)
init_state = [x[-1] for x in init_samples]
# Run inference
sample_kernel = gen_kernel(step_size)
mcmc_samples, leapfrog_num = tfp.mcmc.sample_chain(
num_results=draws,
num_burnin_steps=tune // num_tuning_epoch,
current_state=init_state,
kernel=sample_kernel,
trace_fn=lambda _, pkr: pkr.inner_results.leapfrogs_taken,
seed=seed,
)
return mcmc_samples, leapfrog_num
print("Compiling...")
tic2 = pd.Timestamp.now()
map_seed = jax.random.split(seed, chains)
mcmc_samples, leapfrog_num = _sample(init_state_batched, map_seed)
# map_seed = jax.random.split(seed, chains)
# mcmc_samples = _sample(init_state_batched, map_seed)
# tic4 = pd.Timestamp.now()
# print("Sampling time = ", tic4 - tic3)
posterior = {k: v for k, v in zip(rv_names, mcmc_samples)}
az_trace = az.from_dict(posterior=posterior)
tic3 = pd.Timestamp.now()
print("Compilation + sampling time = ", tic3 - tic2)
return az_trace # , leapfrog_num, tic3 - tic2
def sample_numpyro_nuts(
draws=1000,
tune=1000,
chains=4,
target_accept=0.8,
random_seed=10,
model=None,
progress_bar=True,
):
from numpyro.infer import MCMC, NUTS
from pymc3 import modelcontext
model = modelcontext(model)
seed = jax.random.PRNGKey(random_seed)
fgraph = theano.gof.FunctionGraph(model.free_RVs, [model.logpt])
fns = theano.sandbox.jaxify.jax_funcify(fgraph)
logp_fn_jax = fns[0]
rv_names = [rv.name for rv in model.free_RVs]
init_state = [model.test_point[rv_name] for rv_name in rv_names]
init_state_batched = jax.tree_map(lambda x: np.repeat(x[None, ...], chains, axis=0), init_state)
@jax.jit
def _sample(current_state, seed):
step_size = jax.tree_map(jax.numpy.ones_like, init_state)
nuts_kernel = NUTS(
potential_fn=lambda x: -logp_fn_jax(*x),
# model=model,
target_accept_prob=target_accept,
adapt_step_size=True,
adapt_mass_matrix=True,
dense_mass=False,
)
pmap_numpyro = MCMC(
nuts_kernel,
num_warmup=tune,
num_samples=draws,
num_chains=chains,
postprocess_fn=None,
chain_method="parallel",
progress_bar=progress_bar,
)
pmap_numpyro.run(seed, init_params=current_state, extra_fields=("num_steps",))
samples = pmap_numpyro.get_samples(group_by_chain=True)
leapfrogs_taken = pmap_numpyro.get_extra_fields(group_by_chain=True)["num_steps"]
return samples, leapfrogs_taken
print("Compiling...")
tic2 = pd.Timestamp.now()
map_seed = jax.random.split(seed, chains)
mcmc_samples, leapfrogs_taken = _sample(init_state_batched, map_seed)
# map_seed = jax.random.split(seed, chains)
# mcmc_samples = _sample(init_state_batched, map_seed)
# tic4 = pd.Timestamp.now()
# print("Sampling time = ", tic4 - tic3)
posterior = {k: v for k, v in zip(rv_names, mcmc_samples)}
az_trace = az.from_dict(posterior=posterior)
tic3 = pd.Timestamp.now()
print("Compilation + sampling time = ", tic3 - tic2)
return az_trace # , leapfrogs_taken, tic3 - tic2
| []
| []
| [
"XLA_FLAGS"
]
| [] | ["XLA_FLAGS"] | python | 1 | 0 | |
app.py | # coding: utf-8
from __future__ import unicode_literals
import datetime
import json
import os
import redis
from flask import Flask
from flask import render_template
from parser import get_last_entry
app = Flask(__name__)
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
redis = redis.from_url(redis_url)
CACHE_TIME = datetime.timedelta(hours=1)
@app.route('/')
def index():
last_entries = []
restaurants = json.loads(open('restaurants.json').read())
for restaurant_name in restaurants:
place_data = restaurants[restaurant_name]
place_id = place_data['id']
cache_key = 'last-entry-%s' % (place_id,)
last_entry = redis.get(cache_key)
if last_entry:
last_entry = last_entry.decode('utf-8')
else:
last_entry = get_last_entry(place_id)
redis.setex(name=cache_key, time=CACHE_TIME, value=last_entry)
last_entries.append(
{'name': restaurant_name, 'url': place_data['url'],
'last_entry': last_entry}
)
return render_template('index.html', last_entries=last_entries)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
debug = os.environ.get('DEBUG', False)
app.run(debug=debug, port=port, host='0.0.0.0')
| []
| []
| [
"PORT",
"REDISTOGO_URL",
"DEBUG"
]
| [] | ["PORT", "REDISTOGO_URL", "DEBUG"] | python | 3 | 0 | |
utils/utils.go | package utils
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"index/suffixarray"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error)
go func() {
ch <- f()
}()
return ch
}
// Request a given URL and return an io.Reader
func Download(url string, stderr io.Writer) (*http.Response, error) {
var resp *http.Response
var err error
if resp, err = http.Get(url); err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
}
return resp, nil
}
// Debug function, if the debug flag is set, then display. Do nothing otherwise
// If Docker is in damon mode, also send the debug info on the socket
func Debugf(format string, a ...interface{}) {
if os.Getenv("DEBUG") != "" {
// Retrieve the stack infos
_, file, line, ok := runtime.Caller(1)
if !ok {
file = "<unknown>"
line = -1
} else {
file = file[strings.LastIndex(file, "/")+1:]
}
fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s:%d %s\n", file, line, format), a...)
}
}
// Reader with progress bar
type progressReader struct {
reader io.ReadCloser // Stream to read from
output io.Writer // Where to send progress bar to
readTotal int // Expected stream length (bytes)
readProgress int // How much has been read so far (bytes)
lastUpdate int // How many bytes read at least update
template string // Template to print. Default "%v/%v (%v)"
sf *StreamFormatter
}
func (r *progressReader) Read(p []byte) (n int, err error) {
read, err := io.ReadCloser(r.reader).Read(p)
r.readProgress += read
updateEvery := 4096
if r.readTotal > 0 {
// Only update progress for every 1% read
if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery {
updateEvery = increment
}
}
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
if r.readTotal > 0 {
fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%2.0f%%",float64(r.readProgress)/float64(r.readTotal)*100))
} else {
fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a")
}
r.lastUpdate = r.readProgress
}
// Send newline when complete
if err != nil {
r.output.Write(r.sf.FormatStatus(""))
}
return read, err
}
func (r *progressReader) Close() error {
return io.ReadCloser(r.reader).Close()
}
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template []byte, sf *StreamFormatter) *progressReader {
tpl := string(template)
if tpl == "" {
tpl = string(sf.FormatProgress("", "%v/%v (%v)"))
}
return &progressReader{r, NewWriteFlusher(output), size, 0, 0, tpl, sf}
}
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.)
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%d years", d.Hours()/24/365)
}
// HumanSize returns a human-readable approximation of a size
// using SI standard (eg. "44kB", "17MB")
func HumanSize(size int64) string {
i := 0
var sizef float64
sizef = float64(size)
units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
for sizef >= 1000.0 {
sizef = sizef / 1000.0
i++
}
return fmt.Sprintf("%5.4g %s", sizef, units[i])
}
func Trunc(s string, maxlen int) string {
if len(s) <= maxlen {
return s
}
return s[:maxlen]
}
// Figure out the absolute path of our own binary
func SelfPath() string {
path, err := exec.LookPath(os.Args[0])
if err != nil {
panic(err)
}
path, err = filepath.Abs(path)
if err != nil {
panic(err)
}
return path
}
type NopWriter struct {
}
func (w *NopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
type bufReader struct {
buf *bytes.Buffer
reader io.Reader
err error
l sync.Mutex
wait sync.Cond
}
func NewBufReader(r io.Reader) *bufReader {
reader := &bufReader{
buf: &bytes.Buffer{},
reader: r,
}
reader.wait.L = &reader.l
go reader.drain()
return reader
}
func (r *bufReader) drain() {
buf := make([]byte, 1024)
for {
n, err := r.reader.Read(buf)
r.l.Lock()
if err != nil {
r.err = err
} else {
r.buf.Write(buf[0:n])
}
r.wait.Signal()
r.l.Unlock()
if err != nil {
break
}
}
}
func (r *bufReader) Read(p []byte) (n int, err error) {
r.l.Lock()
defer r.l.Unlock()
for {
n, err = r.buf.Read(p)
if n > 0 {
return n, err
}
if r.err != nil {
return 0, r.err
}
r.wait.Wait()
}
}
func (r *bufReader) Close() error {
closer, ok := r.reader.(io.ReadCloser)
if !ok {
return nil
}
return closer.Close()
}
type WriteBroadcaster struct {
mu sync.Mutex
writers map[io.WriteCloser]struct{}
}
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser) {
w.mu.Lock()
w.writers[writer] = struct{}{}
w.mu.Unlock()
}
// FIXME: Is that function used?
// FIXME: This relies on the concrete writer type used having equality operator
func (w *WriteBroadcaster) RemoveWriter(writer io.WriteCloser) {
w.mu.Lock()
delete(w.writers, writer)
w.mu.Unlock()
}
func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
for writer := range w.writers {
if n, err := writer.Write(p); err != nil || n != len(p) {
// On error, evict the writer
delete(w.writers, writer)
}
}
return len(p), nil
}
func (w *WriteBroadcaster) CloseWriters() error {
w.mu.Lock()
defer w.mu.Unlock()
for writer := range w.writers {
writer.Close()
}
w.writers = make(map[io.WriteCloser]struct{})
return nil
}
func NewWriteBroadcaster() *WriteBroadcaster {
return &WriteBroadcaster{writers: make(map[io.WriteCloser]struct{})}
}
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
Debugf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
type TruncIndex struct {
index *suffixarray.Index
ids map[string]bool
bytes []byte
}
func NewTruncIndex() *TruncIndex {
return &TruncIndex{
index: suffixarray.New([]byte{' '}),
ids: make(map[string]bool),
bytes: []byte{' '},
}
}
func (idx *TruncIndex) Add(id string) error {
if strings.Contains(id, " ") {
return fmt.Errorf("Illegal character: ' '")
}
if _, exists := idx.ids[id]; exists {
return fmt.Errorf("Id already exists: %s", id)
}
idx.ids[id] = true
idx.bytes = append(idx.bytes, []byte(id+" ")...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) Delete(id string) error {
if _, exists := idx.ids[id]; !exists {
return fmt.Errorf("No such id: %s", id)
}
before, after, err := idx.lookup(id)
if err != nil {
return err
}
delete(idx.ids, id)
idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) lookup(s string) (int, int, error) {
offsets := idx.index.Lookup([]byte(" "+s), -1)
//log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes())
if offsets == nil || len(offsets) == 0 || len(offsets) > 1 {
return -1, -1, fmt.Errorf("No such id: %s", s)
}
offsetBefore := offsets[0] + 1
offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ")
return offsetBefore, offsetAfter, nil
}
func (idx *TruncIndex) Get(s string) (string, error) {
before, after, err := idx.lookup(s)
//log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after)
if err != nil {
return "", err
}
return string(idx.bytes[before:after]), err
}
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateID(id string) string {
shortLen := 12
if len(id) < shortLen {
shortLen = len(id)
}
return id[:shortLen]
}
// Code c/c from io.Copy() modified to handle escape sequence
func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
buf := make([]byte, 32*1024)
for {
nr, er := src.Read(buf)
if nr > 0 {
// ---- Docker addition
// char 16 is C-p
if nr == 1 && buf[0] == 16 {
nr, er = src.Read(buf)
// char 17 is C-q
if nr == 1 && buf[0] == 17 {
if err := src.Close(); err != nil {
return 0, err
}
return 0, io.EOF
}
}
// ---- End of docker
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er == io.EOF {
break
}
if er != nil {
err = er
break
}
}
return written, err
}
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
type KernelVersionInfo struct {
Kernel int
Major int
Minor int
Flavor string
}
func (k *KernelVersionInfo) String() string {
flavor := ""
if len(k.Flavor) > 0 {
flavor = fmt.Sprintf("-%s", k.Flavor)
}
return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, flavor)
}
// Compare two KernelVersionInfo struct.
// Returns -1 if a < b, = if a == b, 1 it a > b
func CompareKernelVersion(a, b *KernelVersionInfo) int {
if a.Kernel < b.Kernel {
return -1
} else if a.Kernel > b.Kernel {
return 1
}
if a.Major < b.Major {
return -1
} else if a.Major > b.Major {
return 1
}
if a.Minor < b.Minor {
return -1
} else if a.Minor > b.Minor {
return 1
}
return 0
}
func FindCgroupMountpoint(cgroupType string) (string, error) {
output, err := ioutil.ReadFile("/proc/mounts")
if err != nil {
return "", err
}
// /proc/mounts has 6 fields per line, one mount per line, e.g.
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, " ")
if len(parts) == 6 && parts[2] == "cgroup" {
for _, opt := range strings.Split(parts[3], ",") {
if opt == cgroupType {
return parts[1], nil
}
}
}
}
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}
func GetKernelVersion() (*KernelVersionInfo, error) {
var (
flavor string
kernel, major, minor int
err error
)
uts, err := uname()
if err != nil {
return nil, err
}
release := make([]byte, len(uts.Release))
i := 0
for _, c := range uts.Release {
release[i] = byte(c)
i++
}
// Remove the \x00 from the release for Atoi to parse correctly
release = release[:bytes.IndexByte(release, 0)]
tmp := strings.SplitN(string(release), "-", 2)
tmp2 := strings.SplitN(tmp[0], ".", 3)
if len(tmp2) > 0 {
kernel, err = strconv.Atoi(tmp2[0])
if err != nil {
return nil, err
}
}
if len(tmp2) > 1 {
major, err = strconv.Atoi(tmp2[1])
if err != nil {
return nil, err
}
}
if len(tmp2) > 2 {
minor, err = strconv.Atoi(tmp2[2])
if err != nil {
return nil, err
}
}
if len(tmp) == 2 {
flavor = tmp[1]
} else {
flavor = ""
}
return &KernelVersionInfo{
Kernel: kernel,
Major: major,
Minor: minor,
Flavor: flavor,
}, nil
}
// FIXME: this is deprecated by CopyWithTar in archive.go
func CopyDirectory(source, dest string) error {
if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil {
return fmt.Errorf("Error copy: %s (%s)", err, output)
}
return nil
}
type NopFlusher struct{}
func (f *NopFlusher) Flush() {}
type WriteFlusher struct {
w io.Writer
flusher http.Flusher
}
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
n, err = wf.w.Write(b)
wf.flusher.Flush()
return n, err
}
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
} else {
flusher = &NopFlusher{}
}
return &WriteFlusher{w: w, flusher: flusher}
}
type JSONMessage struct {
Status string `json:"status,omitempty"`
Progress string `json:"progress,omitempty"`
Error string `json:"error,omitempty"`
}
type StreamFormatter struct {
json bool
used bool
}
func NewStreamFormatter(json bool) *StreamFormatter {
return &StreamFormatter{json, false}
}
func (sf *StreamFormatter) FormatStatus(format string, a ...interface{}) []byte {
sf.used = true
str := fmt.Sprintf(format, a...)
if sf.json {
b, err := json.Marshal(&JSONMessage{Status: str})
if err != nil {
return sf.FormatError(err)
}
return b
}
return []byte(str + "\r\n")
}
func (sf *StreamFormatter) FormatError(err error) []byte {
sf.used = true
if sf.json {
if b, err := json.Marshal(&JSONMessage{Error: err.Error()}); err == nil {
return b
}
return []byte("{\"error\":\"format error\"}")
}
return []byte("Error: " + err.Error() + "\r\n")
}
func (sf *StreamFormatter) FormatProgress(action, str string) []byte {
sf.used = true
if sf.json {
b, err := json.Marshal(&JSONMessage{Status: action, Progress: str})
if err != nil {
return nil
}
return b
}
return []byte(action + " " + str + "\r")
}
func (sf *StreamFormatter) Used() bool {
return sf.used
}
func IsURL(str string) bool {
return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://")
}
func IsGIT(str string) bool {
return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/")
}
func CheckLocalDns() bool {
resolv, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
Debugf("Error openning resolv.conf: %s", err)
return false
}
for _, ip := range []string{
"127.0.0.1",
"127.0.1.1",
} {
if strings.Contains(string(resolv), ip) {
return true
}
}
return false
}
func ParseHost(host string, port int, addr string) string {
if strings.HasPrefix(addr, "unix://") {
return addr
}
if strings.HasPrefix(addr, "tcp://") {
addr = strings.TrimPrefix(addr, "tcp://")
}
if strings.Contains(addr, ":") {
hostParts := strings.Split(addr, ":")
if len(hostParts) != 2 {
log.Fatal("Invalid bind address format.")
os.Exit(-1)
}
if hostParts[0] != "" {
host = hostParts[0]
}
if p, err := strconv.Atoi(hostParts[1]); err == nil {
port = p
}
} else {
host = addr
}
return fmt.Sprintf("tcp://%s:%d", host, port)
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
temporal/core.py | """
This module provides the functionality to create the temporal
SQL database and to establish a connection to the database.
Usage:
.. code-block:: python
>>> import grass.temporal as tgis
>>> # Create the temporal database
>>> tgis.init()
>>> # Establish a database connection
>>> dbif, connected = tgis.init_dbif(None)
>>> dbif.connect()
>>> # Execute a SQL statement
>>> dbif.execute_transaction("SELECT datetime(0, 'unixepoch', 'localtime');")
>>> # Mogrify an SQL statement
>>> dbif.mogrify_sql_statement(["SELECT name from raster_base where name = ?",
... ("precipitation",)])
"SELECT name from raster_base where name = 'precipitation'"
>>> dbif.close()
(C) 2011-2014 by the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
:author: Soeren Gebbert
"""
#import traceback
import os
import sys
import grass.script as gscript
if sys.version_info.major == 3:
long = int
from .c_libraries_interface import *
from grass.pygrass import messages
from grass.script.utils import decode, encode
# Import all supported database backends
# Ignore import errors since they are checked later
try:
import sqlite3
except ImportError:
pass
# Postgresql is optional, existence is checked when needed
try:
import psycopg2
import psycopg2.extras
except:
pass
import atexit
from datetime import datetime
###############################################################################
def profile_function(func):
"""Profiling function provided by the temporal framework"""
do_profiling = os.getenv("GRASS_TGIS_PROFILE")
if do_profiling == "True" or do_profiling == "1":
import cProfile, pstats
try:
import StringIO as io
except ImportError:
import io
pr = cProfile.Profile()
pr.enable()
func()
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
else:
func()
# Global variable that defines the backend
# of the temporal GIS
# It can either be "sqlite" or "pg"
tgis_backend = None
def get_tgis_backend():
"""Return the temporal GIS backend as string
:returns: either "sqlite" or "pg"
"""
global tgis_backend
return tgis_backend
# Global variable that defines the database string
# of the temporal GIS
tgis_database = None
def get_tgis_database():
"""Return the temporal database string specified with t.connect
"""
global tgis_database
return tgis_database
# The version of the temporal framework
# this value must be an integer larger than 0
# Increase this value in case of backward incompatible changes in the TGIS API
tgis_version = 2
# The version of the temporal database since framework and database version
# can differ this value must be an integer larger than 0
# Increase this value in case of backward incompatible changes
# temporal database SQL layout
tgis_db_version = 2
# We need to know the parameter style of the database backend
tgis_dbmi_paramstyle = None
def get_tgis_dbmi_paramstyle():
"""Return the temporal database backend parameter style
:returns: "qmark" or ""
"""
global tgis_dbmi_paramstyle
return tgis_dbmi_paramstyle
# We need to access the current mapset quite often in the framework, so we make
# a global variable that will be initiated when init() is called
current_mapset = None
current_location = None
current_gisdbase = None
###############################################################################
def get_current_mapset():
"""Return the current mapset
This is the fastest way to receive the current mapset.
The current mapset is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_mapset
return current_mapset
###############################################################################
def get_current_location():
"""Return the current location
This is the fastest way to receive the current location.
The current location is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_location
return current_location
###############################################################################
def get_current_gisdbase():
"""Return the current gis database (gisdbase)
This is the fastest way to receive the current gisdbase.
The current gisdbase is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_gisdbase
return current_gisdbase
###############################################################################
# If this global variable is set True, then maps can only be registered in
# space time datasets with the same mapset. In addition, only maps in the
# current mapset can be inserted, updated or deleted from the temporal database.
# Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_MAPSET_CHECK=True"
# ATTENTION: Be aware to face corrupted temporal database in case this global
# variable is set to False. This feature is highly
# experimental and violates the grass permission guidance.
enable_mapset_check = True
# If this global variable is set True, the timestamps of maps will be written
# as textfiles for each map that will be inserted or updated in the temporal
# database using the C-library timestamp interface.
# Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_TIMESTAMP_WRITE=True"
# ATTENTION: Be aware to face corrupted temporal database in case this global
# variable is set to False. This feature is highly
# experimental and violates the grass permission guidance.
enable_timestamp_write = True
def get_enable_mapset_check():
"""Return True if the mapsets should be checked while insert, update,
delete requests and space time dataset registration.
If this global variable is set True, then maps can only be registered
in space time datasets with the same mapset. In addition, only maps in
the current mapset can be inserted, updated or deleted from the temporal
database.
Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_MAPSET_CHECK=True"
..warning::
Be aware to face corrupted temporal database in case this
global variable is set to False. This feature is highly
experimental and violates the grass permission guidance.
"""
global enable_mapset_check
return enable_mapset_check
def get_enable_timestamp_write():
"""Return True if the map timestamps should be written to the spatial
database metadata as well.
If this global variable is set True, the timestamps of maps will be
written as textfiles for each map that will be inserted or updated in
the temporal database using the C-library timestamp interface.
Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_TIMESTAMP_WRITE=True"
..warning::
Be aware that C-libraries can not access timestamp information if
they are not written as spatial database metadata, hence modules
that make use of timestamps using the C-library interface will not
work with maps that were created without writing the timestamps.
"""
global enable_timestamp_write
return enable_timestamp_write
###############################################################################
# The global variable that stores the PyGRASS Messenger object that
# provides a fast and exit safe interface to the C-library message functions
message_interface = None
def _init_tgis_message_interface(raise_on_error=False):
"""Initiate the global message interface
:param raise_on_error: If True raise a FatalError exception in case of
a fatal error, call sys.exit(1) otherwise
"""
global message_interface
if message_interface is None:
message_interface = messages.get_msgr(raise_on_error=raise_on_error)
def get_tgis_message_interface():
"""Return the temporal GIS message interface which is of type
grass.pygrass.message.Messenger()
Use this message interface to print messages to stdout using the
GRASS C-library messaging system.
"""
global message_interface
return message_interface
###############################################################################
# The global variable that stores the C-library interface object that
# provides a fast and exit safe interface to the C-library libgis,
# libraster, libraster3d and libvector functions
c_library_interface = None
def _init_tgis_c_library_interface():
"""Set the global C-library interface variable that
provides a fast and exit safe interface to the C-library libgis,
libraster, libraster3d and libvector functions
"""
global c_library_interface
if c_library_interface is None:
c_library_interface = CLibrariesInterface()
def get_tgis_c_library_interface():
"""Return the C-library interface that
provides a fast and exit safe interface to the C-library libgis,
libraster, libraster3d and libvector functions
"""
global c_library_interface
return c_library_interface
###############################################################################
# Set this variable True to raise a FatalError exception
# in case a fatal error occurs using the messenger interface
raise_on_error = False
def set_raise_on_error(raise_exp=True):
"""Define behavior on fatal error, invoked using the tgis messenger
interface (msgr.fatal())
The messenger interface will be restarted using the new error policy
:param raise_exp: True to raise a FatalError exception instead of calling
sys.exit(1) when using the tgis messenger interface
.. code-block:: python
>>> import grass.temporal as tgis
>>> tgis.init()
>>> ignore = tgis.set_raise_on_error(False)
>>> msgr = tgis.get_tgis_message_interface()
>>> tgis.get_raise_on_error()
False
>>> msgr.fatal("Ohh no no no!")
Traceback (most recent call last):
File "__init__.py", line 239, in fatal
sys.exit(1)
SystemExit: 1
>>> tgis.set_raise_on_error(True)
False
>>> msgr.fatal("Ohh no no no!")
Traceback (most recent call last):
File "__init__.py", line 241, in fatal
raise FatalError(message)
FatalError: Ohh no no no!
:returns: current status
"""
global raise_on_error
tmp_raise = raise_on_error
raise_on_error = raise_exp
global message_interface
if message_interface:
message_interface.set_raise_on_error(raise_on_error)
else:
_init_tgis_message_interface(raise_on_error)
return tmp_raise
def get_raise_on_error():
"""Return True if a FatalError exception is raised instead of calling
sys.exit(1) in case a fatal error was invoked with msgr.fatal()
"""
global raise_on_error
return raise_on_error
###############################################################################
def get_tgis_version():
"""Get the version number of the temporal framework
:returns: The version number of the temporal framework as string
"""
global tgis_version
return tgis_version
###############################################################################
def get_tgis_db_version():
"""Get the version number of the temporal framework
:returns: The version number of the temporal framework as string
"""
global tgis_db_version
return tgis_db_version
###############################################################################
def get_tgis_metadata(dbif=None):
"""Return the tgis metadata table as a list of rows (dicts) or None if not
present
:param dbif: The database interface to be used
:returns: The selected rows with key/value columns or None
"""
dbif, connected = init_dbif(dbif)
# Select metadata if the table is present
try:
statement = "SELECT * FROM tgis_metadata;\n"
dbif.execute(statement)
rows = dbif.fetchall()
except:
rows = None
if connected:
dbif.close()
return rows
###############################################################################
# The temporal database string set with t.connect
# with substituted GRASS variables gisdbase, location and mapset
tgis_database_string = None
def get_tgis_database_string():
"""Return the preprocessed temporal database string
This string is the temporal database string set with t.connect
that was processed to substitue location, gisdbase and mapset
variables.
"""
global tgis_database_string
return tgis_database_string
###############################################################################
def get_sql_template_path():
base = os.getenv("GISBASE")
base_etc = os.path.join(base, "etc")
return os.path.join(base_etc, "sql")
###############################################################################
def stop_subprocesses():
"""Stop the messenger and C-interface subprocesses
that are started by tgis.init()
"""
global message_interface
global c_library_interface
if message_interface:
message_interface.stop()
if c_library_interface:
c_library_interface.stop()
# We register this function to be called at exit
atexit.register(stop_subprocesses)
def get_available_temporal_mapsets():
"""Return a list of of mapset names with temporal database driver and names
that are accessible from the current mapset.
:returns: A dictionary, mapset names are keys, the tuple (driver,
database) are the values
"""
global c_library_interface
global message_interface
mapsets = c_library_interface.available_mapsets()
tgis_mapsets = {}
for mapset in mapsets:
mapset = mapset
driver = c_library_interface.get_driver_name(mapset)
database = c_library_interface.get_database_name(mapset)
message_interface.debug(1, "get_available_temporal_mapsets: "\
"\n mapset %s\n driver %s\n database %s"%(mapset,
driver, database))
if driver and database:
# Check if the temporal sqlite database exists
# We need to set non-existing databases in case the mapset is the current mapset
# to create it
if (driver == "sqlite" and os.path.exists(database)) or mapset == get_current_mapset() :
tgis_mapsets[mapset] = (driver, database)
# We need to warn if the connection is defined but the database does not
# exists
if driver == "sqlite" and not os.path.exists(database):
message_interface.warning("Temporal database connection defined as:\n" + \
database + "\nBut database file does not exist.")
return tgis_mapsets
###############################################################################
def init(raise_fatal_error=False):
"""This function set the correct database backend from GRASS environmental
variables and creates the grass temporal database structure for raster,
vector and raster3d maps as well as for the space-time datasets strds,
str3ds and stvds in case it does not exist.
Several global variables are initiated and the messenger and C-library
interface subprocesses are spawned.
Re-run this function in case the following GRASS variables change while
the process runs:
- MAPSET
- LOCATION_NAME
- GISDBASE
- TGIS_DISABLE_MAPSET_CHECK
- TGIS_DISABLE_TIMESTAMP_WRITE
Re-run this function if the following t.connect variables change while
the process runs:
- temporal GIS driver (set by t.connect driver=)
- temporal GIS database (set by t.connect database=)
The following environmental variables are checked:
- GRASS_TGIS_PROFILE (True, False, 1, 0)
- GRASS_TGIS_RAISE_ON_ERROR (True, False, 1, 0)
..warning::
This functions must be called before any spatio-temporal processing
can be started
:param raise_fatal_error: Set this True to assure that the init()
function does not kill a persistent process
like the GUI. If set True a
grass.pygrass.messages.FatalError
exception will be raised in case a fatal
error occurs in the init process, otherwise
sys.exit(1) will be called.
"""
# We need to set the correct database backend and several global variables
# from the GRASS mapset specific environment variables of g.gisenv and t.connect
global tgis_backend
global tgis_database
global tgis_database_string
global tgis_dbmi_paramstyle
global raise_on_error
global enable_mapset_check
global enable_timestamp_write
global current_mapset
global current_location
global current_gisdbase
raise_on_error = raise_fatal_error
# We must run t.connect at first to create the temporal database and to
# get the environmental variables
gscript.run_command("t.connect", flags="c")
grassenv = gscript.gisenv()
# Set the global variable for faster access
current_mapset = grassenv["MAPSET"]
current_location = grassenv["LOCATION_NAME"]
current_gisdbase = grassenv["GISDBASE"]
# Check environment variable GRASS_TGIS_RAISE_ON_ERROR
if os.getenv("GRASS_TGIS_RAISE_ON_ERROR") == "True" or \
os.getenv("GRASS_TGIS_RAISE_ON_ERROR") == "1":
raise_on_error = True
# Check if the script library raises on error,
# if so we do the same
if gscript.get_raise_on_error() is True:
raise_on_error = True
# Start the GRASS message interface server
_init_tgis_message_interface(raise_on_error)
# Start the C-library interface server
_init_tgis_c_library_interface()
msgr = get_tgis_message_interface()
msgr.debug(1, "Initiate the temporal database")
#"\n traceback:%s"%(str(" \n".join(traceback.format_stack()))))
msgr.debug(1, ("Raise on error id: %s"%str(raise_on_error)))
ciface = get_tgis_c_library_interface()
driver_string = ciface.get_driver_name()
database_string = ciface.get_database_name()
# Set the mapset check and the timestamp write
if "TGIS_DISABLE_MAPSET_CHECK" in grassenv:
if gscript.encode(grassenv["TGIS_DISABLE_MAPSET_CHECK"]) == "True" or \
gscript.encode(grassenv["TGIS_DISABLE_MAPSET_CHECK"]) == "1":
enable_mapset_check = False
msgr.warning("TGIS_DISABLE_MAPSET_CHECK is True")
if "TGIS_DISABLE_TIMESTAMP_WRITE" in grassenv:
if gscript.encode(grassenv["TGIS_DISABLE_TIMESTAMP_WRITE"]) == "True" or \
gscript.encode(grassenv["TGIS_DISABLE_TIMESTAMP_WRITE"]) == "1":
enable_timestamp_write = False
msgr.warning("TGIS_DISABLE_TIMESTAMP_WRITE is True")
if driver_string is not None and driver_string != "":
driver_string = decode(driver_string)
if driver_string == "sqlite":
tgis_backend = driver_string
try:
import sqlite3
except ImportError:
msgr.error("Unable to locate the sqlite SQL Python interface"
" module sqlite3.")
raise
dbmi = sqlite3
elif driver_string == "pg":
tgis_backend = driver_string
try:
import psycopg2
except ImportError:
msgr.error("Unable to locate the Postgresql SQL Python "
"interface module psycopg2.")
raise
dbmi = psycopg2
else:
msgr.fatal(_("Unable to initialize the temporal DBMI interface. "
"Please use t.connect to specify the driver and the"
" database string"))
else:
# Set the default sqlite3 connection in case nothing was defined
gscript.run_command("t.connect", flags="d")
driver_string = ciface.get_driver_name()
database_string = ciface.get_database_name()
tgis_backend = driver_string
try:
import sqlite3
except ImportError:
msgr.error("Unable to locate the sqlite SQL Python interface"
" module sqlite3.")
raise
dbmi = sqlite3
tgis_database_string = database_string
# Set the parameter style
tgis_dbmi_paramstyle = dbmi.paramstyle
# We do not know if the database already exists
db_exists = False
dbif = SQLDatabaseInterfaceConnection()
# Check if the database already exists
if tgis_backend == "sqlite":
# Check path of the sqlite database
if os.path.exists(tgis_database_string):
dbif.connect()
# Check for raster_base table
dbif.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='raster_base';")
name = dbif.fetchone()
if name and name[0] == "raster_base":
db_exists = True
dbif.close()
elif tgis_backend == "pg":
# Connect to database
dbif.connect()
# Check for raster_base table
dbif.execute("SELECT EXISTS(SELECT * FROM information_schema.tables "
"WHERE table_name=%s)", ('raster_base',))
if dbif.fetchone()[0]:
db_exists = True
backup_howto = "The format of your actual temporal database is not " \
"supported any more.\nSolution: You need to export it by " \
"restoring the GRASS GIS version used for creating this DB"\
". From there, create a backup of your temporal database "\
"to avoid the loss of your temporal data.\nNotes: Use " \
"t.rast.export and t.vect.export to make a backup of your" \
" existing space time datasets.To safe the timestamps of" \
" your existing maps and space time datasets, use " \
"t.rast.list, t.vect.list and t.rast3d.list. "\
"You can register the existing time stamped maps easily if"\
" you export columns=id,start_time,end_time into text "\
"files and use t.register to register them again in new" \
" created space time datasets (t.create). After the backup"\
" remove the existing temporal database, a new one will be"\
" created automatically.\n"
if db_exists is True:
# Check the version of the temporal database
dbif.close()
dbif.connect()
metadata = get_tgis_metadata(dbif)
dbif.close()
if metadata is None:
msgr.fatal(_("Unable to receive temporal database metadata.\n"
"Current temporal database info:%(info)s") % (
{"info": get_database_info_string()}))
for entry in metadata:
if "tgis_version" in entry and entry[1] != str(get_tgis_version()):
msgr.fatal(_("Unsupported temporal database: version mismatch."
"\n %(backup)s Supported temporal API version is:"
" %(api)i.\nPlease update your GRASS GIS "
"installation.\nCurrent temporal database info:"
"%(info)s") % ({"backup": backup_howto,
"api": get_tgis_version(),
"info": get_database_info_string()}))
if "tgis_db_version" in entry and entry[1] != str(get_tgis_db_version()):
msgr.fatal(_("Unsupported temporal database: version mismatch."
"\n %(backup)sSupported temporal database version"
" is: %(tdb)i\nCurrent temporal database info:"
"%(info)s") % ({"backup": backup_howto,
"tdb": get_tgis_version(),
"info": get_database_info_string()}))
return
create_temporal_database(dbif)
###############################################################################
def get_database_info_string():
dbif = SQLDatabaseInterfaceConnection()
info = "\nDBMI interface:..... " + str(dbif.get_dbmi().__name__)
info += "\nTemporal database:.. " + str(get_tgis_database_string())
return info
###############################################################################
def create_temporal_database(dbif):
"""This function will create the temporal database
It will create all tables and triggers that are needed to run
the temporal GIS
:param dbif: The database interface to be used
"""
global tgis_backend
global tgis_version
global tgis_db_version
global tgis_database_string
template_path = get_sql_template_path()
msgr = get_tgis_message_interface()
# Read all SQL scripts and templates
map_tables_template_sql = open(os.path.join(
template_path, "map_tables_template.sql"), 'r').read()
raster_metadata_sql = open(os.path.join(
get_sql_template_path(), "raster_metadata_table.sql"), 'r').read()
raster3d_metadata_sql = open(os.path.join(template_path,
"raster3d_metadata_table.sql"),
'r').read()
vector_metadata_sql = open(os.path.join(template_path,
"vector_metadata_table.sql"),
'r').read()
raster_views_sql = open(os.path.join(template_path, "raster_views.sql"),
'r').read()
raster3d_views_sql = open(os.path.join(template_path,
"raster3d_views.sql"), 'r').read()
vector_views_sql = open(os.path.join(template_path, "vector_views.sql"),
'r').read()
stds_tables_template_sql = open(os.path.join(template_path,
"stds_tables_template.sql"),
'r').read()
strds_metadata_sql = open(os.path.join(template_path,
"strds_metadata_table.sql"),
'r').read()
str3ds_metadata_sql = open(os.path.join(template_path,
"str3ds_metadata_table.sql"),
'r').read()
stvds_metadata_sql = open(os.path.join(template_path,
"stvds_metadata_table.sql"),
'r').read()
strds_views_sql = open(os.path.join(template_path, "strds_views.sql"),
'r').read()
str3ds_views_sql = open(os.path.join(template_path, "str3ds_views.sql"),
'r').read()
stvds_views_sql = open(os.path.join(template_path, "stvds_views.sql"),
'r').read()
# Create the raster, raster3d and vector tables SQL statements
raster_tables_sql = map_tables_template_sql.replace("GRASS_MAP", "raster")
vector_tables_sql = map_tables_template_sql.replace("GRASS_MAP", "vector")
raster3d_tables_sql = map_tables_template_sql.replace(
"GRASS_MAP", "raster3d")
# Create the space-time raster, raster3d and vector dataset tables
# SQL statements
strds_tables_sql = stds_tables_template_sql.replace("STDS", "strds")
stvds_tables_sql = stds_tables_template_sql.replace("STDS", "stvds")
str3ds_tables_sql = stds_tables_template_sql.replace("STDS", "str3ds")
msgr.message(_("Creating temporal database: %s" % (str(tgis_database_string))))
if tgis_backend == "sqlite":
# We need to create the sqlite3 database path if it does not exist
tgis_dir = os.path.dirname(tgis_database_string)
if not os.path.exists(tgis_dir):
try:
os.makedirs(tgis_dir)
except Exception as e:
msgr.fatal(_("Unable to create SQLite temporal database\n"
"Exception: %s\nPlease use t.connect to set a "
"read- and writable temporal database path" % (e)))
# Set up the trigger that takes care of
# the correct deletion of entries across the different tables
delete_trigger_sql = open(os.path.join(template_path,
"sqlite3_delete_trigger.sql"),
'r').read()
indexes_sql = open(os.path.join(template_path, "sqlite3_indexes.sql"),
'r').read()
else:
# Set up the trigger that takes care of
# the correct deletion of entries across the different tables
delete_trigger_sql = open(os.path.join(template_path,
"postgresql_delete_trigger.sql"),
'r').read()
indexes_sql = open(os.path.join(template_path,
"postgresql_indexes.sql"), 'r').read()
# Connect now to the database
if dbif.connected is not True:
dbif.connect()
# Execute the SQL statements for sqlite
# Create the global tables for the native grass datatypes
dbif.execute_transaction(raster_tables_sql)
dbif.execute_transaction(raster_metadata_sql)
dbif.execute_transaction(raster_views_sql)
dbif.execute_transaction(vector_tables_sql)
dbif.execute_transaction(vector_metadata_sql)
dbif.execute_transaction(vector_views_sql)
dbif.execute_transaction(raster3d_tables_sql)
dbif.execute_transaction(raster3d_metadata_sql)
dbif.execute_transaction(raster3d_views_sql)
# Create the tables for the new space-time datatypes
dbif.execute_transaction(strds_tables_sql)
dbif.execute_transaction(strds_metadata_sql)
dbif.execute_transaction(strds_views_sql)
dbif.execute_transaction(stvds_tables_sql)
dbif.execute_transaction(stvds_metadata_sql)
dbif.execute_transaction(stvds_views_sql)
dbif.execute_transaction(str3ds_tables_sql)
dbif.execute_transaction(str3ds_metadata_sql)
dbif.execute_transaction(str3ds_views_sql)
# The delete trigger
dbif.execute_transaction(delete_trigger_sql)
# The indexes
dbif.execute_transaction(indexes_sql)
# Create the tgis metadata table to store the database
# initial configuration
# The metadata table content
metadata = {}
metadata["tgis_version"] = tgis_version
metadata["tgis_db_version"] = tgis_db_version
metadata["creation_time"] = datetime.today()
_create_tgis_metadata_table(metadata, dbif)
dbif.close()
###############################################################################
def _create_tgis_metadata_table(content, dbif=None):
"""!Create the temporal gis metadata table which stores all metadata
information about the temporal database.
:param content: The dictionary that stores the key:value metadata
that should be stored in the metadata table
:param dbif: The database interface to be used
"""
dbif, connected = init_dbif(dbif)
statement = "CREATE TABLE tgis_metadata (key VARCHAR NOT NULL, value VARCHAR);\n";
dbif.execute_transaction(statement)
for key in content.keys():
statement = "INSERT INTO tgis_metadata (key, value) VALUES " + \
"(\'%s\' , \'%s\');\n" % (str(key), str(content[key]))
dbif.execute_transaction(statement)
if connected:
dbif.close()
###############################################################################
class SQLDatabaseInterfaceConnection(object):
def __init__(self):
self.tgis_mapsets = get_available_temporal_mapsets()
self.current_mapset = get_current_mapset()
self.connections = {}
self.connected = False
self.unique_connections = {}
for mapset in self.tgis_mapsets.keys():
driver, dbstring = self.tgis_mapsets[mapset]
if dbstring not in self.unique_connections.keys():
self.unique_connections[dbstring] = DBConnection(backend=driver,
dbstring=dbstring)
self.connections[mapset] = self.unique_connections[dbstring]
self.msgr = get_tgis_message_interface()
def get_dbmi(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
return self.connections[mapset].dbmi
def rollback(self, mapset=None):
"""
Roll back the last transaction. This must be called
in case a new query should be performed after a db error.
This is only relevant for postgresql database.
"""
if mapset is None:
mapset = self.current_mapset
def connect(self):
"""Connect to the DBMI to execute SQL statements
Supported backends are sqlite3 and postgresql
"""
for mapset in self.tgis_mapsets.keys():
driver, dbstring = self.tgis_mapsets[mapset]
conn = self.connections[mapset]
if conn.is_connected() is False:
conn.connect(dbstring)
self.connected = True
def is_connected(self):
return self.connected
def close(self):
"""Close the DBMI connection
There may be several temporal databases in a location, hence
close all temporal databases that have been opened.
"""
for key in self.unique_connections.keys():
self.unique_connections[key].close()
self.connected = False
def mogrify_sql_statement(self, content, mapset=None):
"""Return the SQL statement and arguments as executable SQL string
:param content: The content as tuple with two entries, the first
entry is the SQL statement with DBMI specific
place holder (?), the second entry is the argument
list that should substitute the place holder.
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to mogrify sql statement. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].mogrify_sql_statement(content)
def check_table(self, table_name, mapset=None):
"""Check if a table exists in the temporal database
:param table_name: The name of the table to be checked for existence
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
:returns: True if the table exists, False otherwise
TODO:
There may be several temporal databases in a location, hence
the mapset is used to query the correct temporal database.
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to check table. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].check_table(table_name)
def execute(self, statement, args=None, mapset=None):
"""
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to execute sql statement. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].execute(statement, args)
def fetchone(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to fetch one. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].fetchone()
def fetchall(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to fetch all. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].fetchall()
def execute_transaction(self, statement, mapset=None):
"""Execute a transactional SQL statement
The BEGIN and END TRANSACTION statements will be added automatically
to the sql statement
:param statement: The executable SQL statement or SQL script
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to execute transaction. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].execute_transaction(statement)
def _create_mapset_error_message(self, mapset):
return("You have no permission to "
"access mapset <%(mapset)s>, or "
"mapset <%(mapset)s> has no temporal database. "
"Accessible mapsets are: <%(mapsets)s>" % \
{"mapset": decode(mapset),
"mapsets":','.join(self.tgis_mapsets.keys())})
###############################################################################
class DBConnection(object):
"""This class represents the database interface connection
and provides access to the chosen backend modules.
The following DBMS are supported:
- sqlite via the sqlite3 standard library
- postgresql via psycopg2
"""
def __init__(self, backend=None, dbstring=None):
""" Constructor of a database connection
param backend:The database backend sqlite or pg
param dbstring: The database connection string
"""
self.connected = False
if backend is None:
global tgis_backend
if decode(tgis_backend) == "sqlite":
self.dbmi = sqlite3
else:
self.dbmi = psycopg2
else:
if decode(backend) == "sqlite":
self.dbmi = sqlite3
else:
self.dbmi = psycopg2
if dbstring is None:
global tgis_database_string
self.dbstring = tgis_database_string
self.dbstring = dbstring
self.msgr = get_tgis_message_interface()
self.msgr.debug(1, "DBConnection constructor:"\
"\n backend: %s"\
"\n dbstring: %s"%(backend, self.dbstring))
#"\n traceback:%s"%(backend, self.dbstring,
#str(" \n".join(traceback.format_stack()))))
def __del__(self):
if self.connected is True:
self.close()
def is_connected(self):
return self.connected
def rollback(self):
"""
Roll back the last transaction. This must be called
in case a new query should be performed after a db error.
This is only relevant for postgresql database.
"""
if self.dbmi.__name__ == "psycopg2":
if self.connected:
self.connection.rollback()
def connect(self, dbstring=None):
"""Connect to the DBMI to execute SQL statements
Supported backends are sqlite3 and postgresql
param dbstring: The database connection string
"""
# Connection in the current mapset
if dbstring is None:
dbstring = self.dbstring
dbstring = decode(dbstring)
try:
if self.dbmi.__name__ == "sqlite3":
self.connection = self.dbmi.connect(dbstring,
detect_types=self.dbmi.PARSE_DECLTYPES | self.dbmi.PARSE_COLNAMES)
self.connection.row_factory = self.dbmi.Row
self.connection.isolation_level = None
self.connection.text_factory = str
self.cursor = self.connection.cursor()
self.cursor.execute("PRAGMA synchronous = OFF")
self.cursor.execute("PRAGMA journal_mode = MEMORY")
elif self.dbmi.__name__ == "psycopg2":
self.connection = self.dbmi.connect(dbstring)
#self.connection.set_isolation_level(dbmi.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.connection.cursor(
cursor_factory=self.dbmi.extras.DictCursor)
self.connected = True
except Exception as e:
self.msgr.fatal(_("Unable to connect to %(db)s database: "
"%(string)s\nException: \"%(ex)s\"\nPlease use"
" t.connect to set a read- and writable "
"temporal database backend") % (
{"db": self.dbmi.__name__,
"string": tgis_database_string, "ex": e, }))
def close(self):
"""Close the DBMI connection
TODO:
There may be several temporal databases in a location, hence
close all temporal databases that have been opened. Use a dictionary
to manage different connections.
"""
self.connection.commit()
self.cursor.close()
self.connected = False
def mogrify_sql_statement(self, content):
"""Return the SQL statement and arguments as executable SQL string
TODO:
Use the mapset argument to identify the correct database driver
:param content: The content as tuple with two entries, the first
entry is the SQL statement with DBMI specific
place holder (?), the second entry is the argument
list that should substitute the place holder.
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
Usage:
.. code-block:: python
>>> init()
>>> dbif = SQLDatabaseInterfaceConnection()
>>> dbif.mogrify_sql_statement(["SELECT ctime FROM raster_base WHERE id = ?",
... ["soil@PERMANENT",]])
"SELECT ctime FROM raster_base WHERE id = 'soil@PERMANENT'"
"""
sql = content[0]
args = content[1]
if self.dbmi.__name__ == "psycopg2":
if len(args) == 0:
return sql
else:
if self.connected:
try:
return self.cursor.mogrify(sql, args)
except Exception as exc:
print(sql, args)
raise exc
else:
self.connect()
statement = self.cursor.mogrify(sql, args)
self.close()
return statement
elif self.dbmi.__name__ == "sqlite3":
if len(args) == 0:
return sql
else:
# Unfortunately as sqlite does not support
# the transformation of sql strings and qmarked or
# named arguments we must make our hands dirty
# and do it by ourself. :(
# Doors are open for SQL injection because of the
# limited python sqlite3 implementation!!!
pos = 0
count = 0
maxcount = 100
statement = sql
while count < maxcount:
pos = statement.find("?", pos + 1)
if pos == -1:
break
if args[count] is None:
statement = "%sNULL%s" % (statement[0:pos],
statement[pos + 1:])
elif isinstance(args[count], (int, long)):
statement = "%s%d%s" % (statement[0:pos], args[count],
statement[pos + 1:])
elif isinstance(args[count], float):
statement = "%s%f%s" % (statement[0:pos], args[count],
statement[pos + 1:])
elif isinstance(args[count], datetime):
statement = "%s\'%s\'%s" % (statement[0:pos], str(args[count]),
statement[pos + 1:])
else:
# Default is a string, this works for datetime
# objects too
statement = "%s\'%s\'%s" % (statement[0:pos],
str(args[count]),
statement[pos + 1:])
count += 1
return statement
def check_table(self, table_name):
"""Check if a table exists in the temporal database
:param table_name: The name of the table to be checked for existence
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
:returns: True if the table exists, False otherwise
TODO:
There may be several temporal databases in a location, hence
the mapset is used to query the correct temporal database.
"""
table_exists = False
connected = False
if not self.connected:
self.connect()
connected = True
# Check if the database already exists
if self.dbmi.__name__ == "sqlite3":
self.cursor.execute("SELECT name FROM sqlite_master WHERE "
"type='table' AND name='%s';" % table_name)
name = self.cursor.fetchone()
if name and name[0] == table_name:
table_exists = True
else:
# Check for raster_base table
self.cursor.execute("SELECT EXISTS(SELECT * FROM information_schema.tables "
"WHERE table_name=%s)", ('%s' % table_name,))
if self.cursor.fetchone()[0]:
table_exists = True
if connected:
self.close()
return table_exists
def execute(self, statement, args=None):
"""Execute a SQL statement
:param statement: The executable SQL statement or SQL script
"""
connected = False
if not self.connected:
self.connect()
connected = True
try:
if args:
self.cursor.execute(statement, args)
else:
self.cursor.execute(statement)
except:
if connected:
self.close()
self.msgr.error(_("Unable to execute :\n %(sql)s" %
{"sql": statement}))
raise
if connected:
self.close()
def fetchone(self):
if self.connected:
return self.cursor.fetchone()
return None
def fetchall(self):
if self.connected:
return self.cursor.fetchall()
return None
def execute_transaction(self, statement, mapset=None):
"""Execute a transactional SQL statement
The BEGIN and END TRANSACTION statements will be added automatically
to the sql statement
:param statement: The executable SQL statement or SQL script
"""
connected = False
if not self.connected:
self.connect()
connected = True
sql_script = ""
sql_script += "BEGIN TRANSACTION;\n"
sql_script += statement
sql_script += "END TRANSACTION;"
try:
if self.dbmi.__name__ == "sqlite3":
self.cursor.executescript(statement)
else:
self.cursor.execute(statement)
self.connection.commit()
except:
if connected:
self.close()
self.msgr.error(_("Unable to execute transaction:\n %(sql)s" %
{"sql": statement}))
raise
if connected:
self.close()
###############################################################################
def init_dbif(dbif):
"""This method checks if the database interface connection exists,
if not a new one will be created, connected and True will be returned.
If the database interface exists but is connected, the connection will
be established.
:returns: the tuple (dbif, True|False)
Usage code sample:
.. code-block:: python
dbif, connect = tgis.init_dbif(None)
sql = dbif.mogrify_sql_statement(["SELECT * FROM raster_base WHERE ? = ?"],
["id", "soil@PERMANENT"])
dbif.execute_transaction(sql)
if connect:
dbif.close()
"""
if dbif is None:
dbif = SQLDatabaseInterfaceConnection()
dbif.connect()
return dbif, True
elif dbif.is_connected() is False:
dbif.connect()
return dbif, True
return dbif, False
###############################################################################
if __name__ == "__main__":
import doctest
doctest.testmod()
| []
| []
| [
"GRASS_TGIS_PROFILE",
"GISBASE",
"GRASS_TGIS_RAISE_ON_ERROR"
]
| [] | ["GRASS_TGIS_PROFILE", "GISBASE", "GRASS_TGIS_RAISE_ON_ERROR"] | python | 3 | 0 | |
kaffepi/config.py | import os
def read_envfile(filename='.env'):
# Poor man's dotenv
if os.path.isfile(filename):
with open(filename, 'r') as infp:
kvs = [
[a.strip() for a in line.split('=', 1)]
for line in infp
if '=' in line and not line.startswith('#')
]
os.environ.update(dict(kvs))
def get_config():
return dict(
OUTPUT_DIR=(
os.environ.get('OUTPUT_DIR') or
os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'shots'))
),
RASPISTILL=os.environ.get('RASPISTILL', '/usr/bin/raspistill'),
S3_BUCKET=os.environ.get('S3_BUCKET'),
)
| []
| []
| [
"S3_BUCKET",
"RASPISTILL",
"OUTPUT_DIR"
]
| [] | ["S3_BUCKET", "RASPISTILL", "OUTPUT_DIR"] | python | 3 | 0 | |
Collections-a-installer/community-general-2.4.0/plugins/lookup/passwordstore.py | # (c) 2017, Patrick Deelman <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: passwordstore
author:
- Patrick Deelman (!UNKNOWN) <[email protected]>
short_description: manage passwords with passwordstore.org's pass utility
description:
- Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
It also retrieves YAML style keys stored as multilines in the passwordfile.
options:
_terms:
description: query key.
required: True
passwordstore:
description: location of the password store.
default: '~/.password-store'
directory:
description: The directory of the password store.
env:
- name: PASSWORD_STORE_DIR
create:
description: Create the password if it does not already exist.
type: bool
default: 'no'
overwrite:
description: Overwrite the password if it does already exist.
type: bool
default: 'no'
umask:
description:
- Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable).
- Note pass' default value is C('077').
env:
- name: PASSWORD_STORE_UMASK
version_added: 1.3.0
returnall:
description: Return all the content of the password, not only the first line.
type: bool
default: 'no'
subkey:
description: Return a specific subkey of the password. When set to C(password), always returns the first line.
default: password
userpass:
description: Specify a password to save, instead of a generated one.
length:
description: The length of the generated password.
type: integer
default: 16
backup:
description: Used with C(overwrite=yes). Backup the previous password in a subkey.
type: bool
default: 'no'
nosymbols:
description: use alphanumeric characters.
type: bool
default: 'no'
'''
EXAMPLES = """
# Debug is used for examples, BAD IDEA to show passwords on screen
- name: Basic lookup. Fails if example/test doesn't exist
ansible.builtin.debug:
msg: "{{ lookup('community.general.passwordstore', 'example/test')}}"
- name: Create pass with random 16 character password. If password exists just give the password
ansible.builtin.debug:
var: mypassword
vars:
mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}"
- name: Different size password
ansible.builtin.debug:
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}"
- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
ansible.builtin.debug:
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}"
- name: Create an alphanumeric password
ansible.builtin.debug:
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}"
- name: Return the value for user in the KV pair user, username
ansible.builtin.debug:
msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}"
- name: Return the entire password file content
ansible.builtin.set_fact:
passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}"
"""
RETURN = """
_raw:
description:
- a password
type: list
elements: str
"""
import os
import subprocess
import time
import yaml
from distutils import util
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.encrypt import random_password
from ansible.plugins.lookup import LookupBase
from ansible import constants as C
# backhacked check_output with input for python 2.7
# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
def check_output2(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict')
del kwargs['input']
kwargs['stdin'] = subprocess.PIPE
else:
b_inputdata = None
process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
try:
b_out, b_err = process.communicate(b_inputdata)
except Exception:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode != 0 or \
b'encryption failed: Unusable public key' in b_out or \
b'encryption failed: Unusable public key' in b_err:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(
retcode,
cmd,
to_native(b_out + b_err, errors='surrogate_or_strict')
)
return b_out
class LookupModule(LookupBase):
def parse_params(self, term):
# I went with the "traditional" param followed with space separated KV pairs.
# Waiting for final implementation of lookup parameter parsing.
# See: https://github.com/ansible/ansible/issues/12255
params = term.split()
if len(params) > 0:
# the first param is the pass-name
self.passname = params[0]
# next parse the optional parameters in keyvalue pairs
try:
for param in params[1:]:
name, value = param.split('=', 1)
if name not in self.paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
self.paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
# check and convert values
try:
for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
if not isinstance(self.paramvals[key], bool):
self.paramvals[key] = util.strtobool(self.paramvals[key])
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if not isinstance(self.paramvals['length'], int):
if self.paramvals['length'].isdigit():
self.paramvals['length'] = int(self.paramvals['length'])
else:
raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
# Collect pass environment variables from the plugin's parameters.
self.env = os.environ.copy()
# Set PASSWORD_STORE_DIR if directory is set
if self.paramvals['directory']:
if os.path.isdir(self.paramvals['directory']):
self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory']
else:
raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
# Set PASSWORD_STORE_UMASK if umask is set
if 'umask' in self.paramvals:
if len(self.paramvals['umask']) != 3:
raise AnsibleError('Passwordstore umask must have a length of 3.')
elif int(self.paramvals['umask'][0]) > 3:
raise AnsibleError('Passwordstore umask not allowed (password not user readable).')
else:
self.env['PASSWORD_STORE_UMASK'] = self.paramvals['umask']
def check_pass(self):
try:
self.passoutput = to_text(
check_output2(["pass", "show", self.passname], env=self.env),
errors='surrogate_or_strict'
).splitlines()
self.password = self.passoutput[0]
self.passdict = {}
try:
values = yaml.safe_load('\n'.join(self.passoutput[1:]))
for key, item in values.items():
self.passdict[key] = item
except (yaml.YAMLError, AttributeError):
for line in self.passoutput[1:]:
if ':' in line:
name, value = line.split(':', 1)
self.passdict[name.strip()] = value.strip()
except (subprocess.CalledProcessError) as e:
if e.returncode != 0 and 'not in the password store' in e.output:
# if pass returns 1 and return string contains 'is not in the password store.'
# We need to determine if this is valid or Error.
if not self.paramvals['create']:
raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname))
else:
return False
else:
raise AnsibleError(e)
return True
def get_newpass(self):
if self.paramvals['nosymbols']:
chars = C.DEFAULT_PASSWORD_CHARS[:62]
else:
chars = C.DEFAULT_PASSWORD_CHARS
if self.paramvals['userpass']:
newpass = self.paramvals['userpass']
else:
newpass = random_password(length=self.paramvals['length'], chars=chars)
return newpass
def update_password(self):
# generate new password, insert old lines from current result and return new password
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
msg = newpass + '\n'
if self.passoutput[1:]:
msg += '\n'.join(self.passoutput[1:]) + '\n'
if self.paramvals['backup']:
msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
try:
check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def generate_password(self):
# generate new file and insert lookup_pass: Generated by Ansible on {date}
# use pwgen to generate the password and insert values with pass -m
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
try:
check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def get_passresult(self):
if self.paramvals['returnall']:
return os.linesep.join(self.passoutput)
if self.paramvals['subkey'] == 'password':
return self.password
else:
if self.paramvals['subkey'] in self.passdict:
return self.passdict[self.paramvals['subkey']]
else:
return None
def run(self, terms, variables, **kwargs):
result = []
self.paramvals = {
'subkey': 'password',
'directory': variables.get('passwordstore'),
'create': False,
'returnall': False,
'overwrite': False,
'nosymbols': False,
'userpass': '',
'length': 16,
'backup': False,
}
for term in terms:
self.parse_params(term) # parse the input into paramvals
if self.check_pass(): # password exists
if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password':
result.append(self.update_password())
else:
result.append(self.get_passresult())
else: # password does not exist
if self.paramvals['create']:
result.append(self.generate_password())
return result
| []
| []
| []
| [] | [] | python | 0 | 0 | |
registry/gossip/gossip_test.go | package gossip
import (
"os"
"sync"
"testing"
"time"
"github.com/google/uuid"
"github.com/hashicorp/memberlist"
"github.com/micro/go-micro/registry"
)
func newMemberlistConfig() *memberlist.Config {
mc := memberlist.DefaultLANConfig()
mc.DisableTcpPings = false
mc.GossipVerifyIncoming = false
mc.GossipVerifyOutgoing = false
mc.EnableCompression = false
mc.PushPullInterval = 3 * time.Second
mc.LogOutput = os.Stderr
mc.ProtocolVersion = 4
mc.Name = uuid.New().String()
return mc
}
func newRegistry(opts ...registry.Option) registry.Registry {
options := []registry.Option{
ConnectRetry(true),
ConnectTimeout(60 * time.Second),
}
options = append(options, opts...)
r := NewRegistry(options...)
return r
}
func TestGossipRegistryBroadcast(t *testing.T) {
mc1 := newMemberlistConfig()
r1 := newRegistry(Config(mc1), Address("127.0.0.1:54321"))
mc2 := newMemberlistConfig()
r2 := newRegistry(Config(mc2), Address("127.0.0.1:54322"), registry.Addrs("127.0.0.1:54321"))
defer r1.(*gossipRegistry).Stop()
defer r2.(*gossipRegistry).Stop()
svc1 := ®istry.Service{Name: "service.1", Version: "0.0.0.1"}
svc2 := ®istry.Service{Name: "service.2", Version: "0.0.0.2"}
if err := r1.Register(svc1, registry.RegisterTTL(10*time.Second)); err != nil {
t.Fatal(err)
}
if err := r2.Register(svc2, registry.RegisterTTL(10*time.Second)); err != nil {
t.Fatal(err)
}
var found bool
svcs, err := r1.ListServices()
if err != nil {
t.Fatal(err)
}
for _, svc := range svcs {
if svc.Name == "service.2" {
found = true
}
}
if !found {
t.Fatalf("[gossip registry] service.2 not found in r1, broadcast not work")
}
found = false
svcs, err = r2.ListServices()
if err != nil {
t.Fatal(err)
}
for _, svc := range svcs {
if svc.Name == "service.1" {
found = true
}
}
if !found {
t.Fatalf("[gossip registry] broadcast failed: service.1 not found in r2")
}
if err := r1.Deregister(svc1); err != nil {
t.Fatal(err)
}
if err := r2.Deregister(svc2); err != nil {
t.Fatal(err)
}
}
func TestGossipRegistryRetry(t *testing.T) {
mc1 := newMemberlistConfig()
r1 := newRegistry(Config(mc1), Address("127.0.0.1:54321"))
mc2 := newMemberlistConfig()
r2 := newRegistry(Config(mc2), Address("127.0.0.1:54322"), registry.Addrs("127.0.0.1:54321"))
defer r1.(*gossipRegistry).Stop()
defer r2.(*gossipRegistry).Stop()
svc1 := ®istry.Service{Name: "service.1", Version: "0.0.0.1"}
svc2 := ®istry.Service{Name: "service.2", Version: "0.0.0.2"}
var mu sync.Mutex
ch := make(chan struct{})
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
go func() {
for {
select {
case <-ticker.C:
mu.Lock()
if r1 != nil {
r1.Register(svc1, registry.RegisterTTL(2*time.Second))
}
if r2 != nil {
r2.Register(svc2, registry.RegisterTTL(2*time.Second))
}
if ch != nil {
close(ch)
ch = nil
}
mu.Unlock()
}
}
}()
<-ch
var found bool
svcs, err := r2.ListServices()
if err != nil {
t.Fatal(err)
}
for _, svc := range svcs {
if svc.Name == "service.1" {
found = true
}
}
if !found {
t.Fatalf("[gossip registry] broadcast failed: service.1 not found in r2")
}
if err = r1.(*gossipRegistry).Stop(); err != nil {
t.Fatalf("[gossip registry] failed to stop registry: %v", err)
}
mu.Lock()
r1 = nil
mu.Unlock()
<-time.After(3 * time.Second)
found = false
svcs, err = r2.ListServices()
if err != nil {
t.Fatal(err)
}
for _, svc := range svcs {
if svc.Name == "service.1" {
found = true
}
}
if found {
t.Fatalf("[gossip registry] service.1 found in r2")
}
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
t.Logf("[gossip registry] skip test on travis")
t.Skip()
return
}
r1 = newRegistry(Config(mc1), Address("127.0.0.1:54321"))
<-time.After(2 * time.Second)
found = false
svcs, err = r2.ListServices()
if err != nil {
t.Fatal(err)
}
for _, svc := range svcs {
if svc.Name == "service.1" {
found = true
}
}
if !found {
t.Fatalf("[gossip registry] connect retry failed: service.1 not found in r2")
}
if err := r1.Deregister(svc1); err != nil {
t.Fatal(err)
}
if err := r2.Deregister(svc2); err != nil {
t.Fatal(err)
}
r1.(*gossipRegistry).Stop()
r2.(*gossipRegistry).Stop()
}
| [
"\"TRAVIS\""
]
| []
| [
"TRAVIS"
]
| [] | ["TRAVIS"] | go | 1 | 0 | |
executor/executor_test.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"context"
"flag"
"fmt"
"io/ioutil"
"math"
"net"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/golang/protobuf/proto"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/server"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
tikvutil "github.com/pingcap/tidb/store/tikv/util"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/rowcodec"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
"github.com/pingcap/tidb/util/timeutil"
"github.com/pingcap/tipb/go-tipb"
"google.golang.org/grpc"
)
const (
checkSelectRequestHookString = "CheckSelectRequestHook"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
*CustomParallelSuiteFlag = true
logLevel := os.Getenv("log_level")
err := logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
if err != nil {
t.Fatal(err)
}
autoid.SetStep(5000)
config.UpdateGlobal(func(conf *config.Config) {
conf.Log.SlowThreshold = 30000 // 30s
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
conf.Experimental.AllowsExpressionIndex = true
})
tmpDir := config.GetGlobalConfig().TempStoragePath
_ = os.RemoveAll(tmpDir) // clean the uncleared temp file during the last run.
_ = os.MkdirAll(tmpDir, 0755)
testleak.BeforeTest()
TestingT(t)
testleak.AfterTestT(t)()
}
var _ = Suite(&testSuite{&baseTestSuite{}})
var _ = Suite(&testSuiteP1{&baseTestSuite{}})
var _ = Suite(&testSuiteP2{&baseTestSuite{}})
var _ = Suite(&testSuite1{})
var _ = SerialSuites(&testSerialSuite2{})
var _ = Suite(&testSuite2{&baseTestSuite{}})
var _ = Suite(&testSuite3{&baseTestSuite{}})
var _ = Suite(&testSuite4{&baseTestSuite{}})
var _ = Suite(&testSuite5{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin1{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin2{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin3{&baseTestSuite{}})
var _ = SerialSuites(&testSuiteJoinSerial{&baseTestSuite{}})
var _ = Suite(&testSuiteAgg{baseTestSuite: &baseTestSuite{}})
var _ = Suite(&testSuite6{&baseTestSuite{}})
var _ = Suite(&testSuite7{&baseTestSuite{}})
var _ = Suite(&testSuite8{&baseTestSuite{}})
var _ = SerialSuites(&testShowStatsSuite{&baseTestSuite{}})
var _ = Suite(&testBypassSuite{})
var _ = Suite(&testUpdateSuite{})
var _ = Suite(&testPointGetSuite{})
var _ = Suite(&testBatchPointGetSuite{})
var _ = SerialSuites(&testRecoverTable{})
var _ = SerialSuites(&testMemTableReaderSuite{&testClusterTableBase{}})
var _ = SerialSuites(&testFlushSuite{})
var _ = SerialSuites(&testAutoRandomSuite{&baseTestSuite{}})
var _ = SerialSuites(&testClusterTableSuite{})
var _ = SerialSuites(&testPrepareSerialSuite{&baseTestSuite{}})
var _ = SerialSuites(&testSplitTable{&baseTestSuite{}})
var _ = Suite(&testSuiteWithData{baseTestSuite: &baseTestSuite{}})
var _ = SerialSuites(&testSerialSuite1{&baseTestSuite{}})
var _ = SerialSuites(&testSlowQuery{&baseTestSuite{}})
var _ = Suite(&partitionTableSuite{&baseTestSuite{}})
var _ = SerialSuites(&tiflashTestSuite{})
var _ = SerialSuites(&globalIndexSuite{&baseTestSuite{}})
var _ = SerialSuites(&testSerialSuite{&baseTestSuite{}})
var _ = SerialSuites(&testStaleTxnSerialSuite{&baseTestSuite{}})
var _ = SerialSuites(&testCoprCache{})
var _ = SerialSuites(&testPrepareSuite{})
type testSuite struct{ *baseTestSuite }
type testSuiteP1 struct{ *baseTestSuite }
type testSuiteP2 struct{ *baseTestSuite }
type testSplitTable struct{ *baseTestSuite }
type testSuiteWithData struct {
*baseTestSuite
testData testutil.TestData
}
type testSlowQuery struct{ *baseTestSuite }
type partitionTableSuite struct{ *baseTestSuite }
type globalIndexSuite struct{ *baseTestSuite }
type testSerialSuite struct{ *baseTestSuite }
type testStaleTxnSerialSuite struct{ *baseTestSuite }
type testCoprCache struct {
store kv.Storage
dom *domain.Domain
cls cluster.Cluster
}
type testPrepareSuite struct{ testData testutil.TestData }
type baseTestSuite struct {
cluster cluster.Cluster
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context // nolint:structcheck
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test")
func (s *baseTestSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
store, err := mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c cluster.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionLog
})
}
func (s *testSuiteWithData) SetUpSuite(c *C) {
s.baseTestSuite.SetUpSuite(c)
var err error
s.testData, err = testutil.LoadTestSuiteData("testdata", "executor_suite")
c.Assert(err, IsNil)
}
func (s *testSuiteWithData) TearDownSuite(c *C) {
s.baseTestSuite.TearDownSuite(c)
c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
}
func (s *testPrepareSuite) SetUpSuite(c *C) {
var err error
s.testData, err = testutil.LoadTestSuiteData("testdata", "prepare_suite")
c.Assert(err, IsNil)
}
func (s *testPrepareSuite) TearDownSuite(c *C) {
c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
}
func (s *baseTestSuite) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
}
func (s *globalIndexSuite) SetUpSuite(c *C) {
s.baseTestSuite.SetUpSuite(c)
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableGlobalIndex = true
})
}
func (s *testSuiteP1) TestPessimisticSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, a int)")
tk.MustExec("insert into t values(1, 1)")
tk.MustExec("begin PESSIMISTIC")
tk.MustQuery("select a from t where id=1 for update").Check(testkit.Rows("1"))
tk.MustExec("update t set a=a+1 where id=1")
tk.MustExec("commit")
tk.MustQuery("select a from t where id=1").Check(testkit.Rows("2"))
}
func (s *testSuite) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuiteP1) TestBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists testbind")
tk.MustExec("create table testbind(i int, s varchar(20))")
tk.MustExec("create index index_t on testbind(i,s)")
tk.MustExec("create global binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show global bindings").Rows()), Equals, 1)
tk.MustExec("create session binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show session bindings").Rows()), Equals, 1)
tk.MustExec("drop session binding for select * from testbind")
}
func (s *testSuiteP1) TestChange(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("alter table t change a b int")
tk.MustExec("alter table t change b c bigint")
c.Assert(tk.ExecToErr("alter table t change c d varchar(100)"), NotNil)
}
func (s *testSuiteP1) TestChangePumpAndDrainer(c *C) {
tk := testkit.NewTestKit(c, s.store)
// change pump or drainer's state need connect to etcd
// so will meet error "URL scheme must be http, https, unix, or unixs: /tmp/tidb"
err := tk.ExecToErr("change pump to node_state ='paused' for node_id 'pump1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
err = tk.ExecToErr("change drainer to node_state ='paused' for node_id 'drainer1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
}
func (s *testSuiteP1) TestLoadStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
c.Assert(tk.ExecToErr("load stats"), NotNil)
c.Assert(tk.ExecToErr("load stats ./xxx.json"), NotNil)
}
func (s *testSuiteP1) TestShow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test_show;")
tk.MustExec("use test_show")
tk.MustQuery("show engines")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
c.Assert(len(tk.MustQuery("show index in t").Rows()), Equals, 1)
c.Assert(len(tk.MustQuery("show index from t").Rows()), Equals, 1)
tk.MustQuery("show charset").Check(testkit.Rows(
"utf8 UTF-8 Unicode utf8_bin 3",
"utf8mb4 UTF-8 Unicode utf8mb4_bin 4",
"ascii US ASCII ascii_bin 1",
"latin1 Latin1 latin1_bin 1",
"binary binary binary 1"))
c.Assert(len(tk.MustQuery("show master status").Rows()), Equals, 1)
tk.MustQuery("show create database test_show").Check(testkit.Rows("test_show CREATE DATABASE `test_show` /*!40100 DEFAULT CHARACTER SET utf8mb4 */"))
tk.MustQuery("show privileges").Check(testkit.Rows("Alter Tables To alter the table",
"Alter routine Functions,Procedures To alter or drop stored functions/procedures",
"Create Databases,Tables,Indexes To create new databases and tables",
"Create routine Databases To use CREATE FUNCTION/PROCEDURE",
"Create temporary tables Databases To use CREATE TEMPORARY TABLE",
"Create view Tables To create new views",
"Create user Server Admin To create new users",
"Delete Tables To delete existing rows",
"Drop Databases,Tables To drop databases, tables, and views",
"Event Server Admin To create, alter, drop and execute events",
"Execute Functions,Procedures To execute stored routines",
"File File access on server To read and write files on the server",
"Grant option Databases,Tables,Functions,Procedures To give to other users those privileges you possess",
"Index Tables To create or drop indexes",
"Insert Tables To insert data into tables",
"Lock tables Databases To use LOCK TABLES (together with SELECT privilege)",
"Process Server Admin To view the plain text of currently executing queries",
"Proxy Server Admin To make proxy user possible",
"References Databases,Tables To have references on tables",
"Reload Server Admin To reload or refresh tables, logs and privileges",
"Replication client Server Admin To ask where the slave or master servers are",
"Replication slave Server Admin To read binary log events from the master",
"Select Tables To retrieve rows from table",
"Show databases Server Admin To see all databases with SHOW DATABASES",
"Show view Tables To see views with SHOW CREATE VIEW",
"Shutdown Server Admin To shut down the server",
"Super Server Admin To use KILL thread, SET GLOBAL, CHANGE MASTER, etc.",
"Trigger Tables To use triggers",
"Create tablespace Server Admin To create/alter/drop tablespaces",
"Update Tables To update existing rows",
"Usage Server Admin No privileges - allow connect only",
"BACKUP_ADMIN Server Admin ",
"SYSTEM_VARIABLES_ADMIN Server Admin ",
"ROLE_ADMIN Server Admin ",
"CONNECTION_ADMIN Server Admin ",
"RESTRICTED_TABLES_ADMIN Server Admin ",
"RESTRICTED_STATUS_ADMIN Server Admin ",
))
c.Assert(len(tk.MustQuery("show table status").Rows()), Equals, 1)
}
func (s *testSuite3) TestAdmin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1))")
tk.MustExec("insert admin_test (c1) values (1),(2),(NULL)")
ctx := context.Background()
// cancel DDL jobs test
r, err := tk.Exec("admin cancel ddl jobs 1")
c.Assert(err, IsNil, Commentf("err %v", err))
req := r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row := req.GetRow(0)
c.Assert(row.Len(), Equals, 2)
c.Assert(row.GetString(0), Equals, "1")
c.Assert(row.GetString(1), Matches, "*DDL Job:1 not found")
// show ddl test;
r, err = tk.Exec("admin show ddl")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 6)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
ddlInfo, err := admin.GetDDLInfo(txn)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, ddlInfo.SchemaVer)
// TODO: Pass this test.
// rowOwnerInfos := strings.Split(row.Data[1].GetString(), ",")
// ownerInfos := strings.Split(ddlInfo.Owner.String(), ",")
// c.Assert(rowOwnerInfos[0], Equals, ownerInfos[0])
serverInfo, err := infosync.GetServerInfoByID(ctx, row.GetString(1))
c.Assert(err, IsNil)
c.Assert(row.GetString(2), Equals, serverInfo.IP+":"+
strconv.FormatUint(uint64(serverInfo.Port), 10))
c.Assert(row.GetString(3), Equals, "")
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
c.Assert(req.NumRows() == 0, IsTrue)
err = txn.Rollback()
c.Assert(err, IsNil)
// show DDL jobs test
r, err = tk.Exec("admin show ddl jobs")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
c.Assert(len(historyJobs), Greater, 1)
c.Assert(len(row.GetString(1)), Greater, 0)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
r, err = tk.Exec("admin show ddl jobs 20")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
// show DDL job queries test
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test2")
tk.MustExec("create table admin_test2 (c1 int, c2 int, c3 int default 1, index (c1))")
result := tk.MustQuery(`admin show ddl job queries 1, 1, 1`)
result.Check(testkit.Rows())
result = tk.MustQuery(`admin show ddl job queries 1, 2, 3, 4`)
result.Check(testkit.Rows())
historyJobs, err = admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
result = tk.MustQuery(fmt.Sprintf("admin show ddl job queries %d", historyJobs[0].ID))
result.Check(testkit.Rows(historyJobs[0].Query))
c.Assert(err, IsNil)
// check table test
tk.MustExec("create table admin_test1 (c1 int, c2 int default 1, index (c1))")
tk.MustExec("insert admin_test1 (c1) values (21),(22)")
r, err = tk.Exec("admin check table admin_test, admin_test1")
c.Assert(err, IsNil)
c.Assert(r, IsNil)
// error table name
err = tk.ExecToErr("admin check table admin_test_error")
c.Assert(err, NotNil)
// different index values
sctx := tk.Se.(sessionctx.Context)
dom := domain.GetDomain(sctx)
is := dom.InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test"))
c.Assert(err, IsNil)
c.Assert(tb.Indices(), HasLen, 1)
_, err = tb.Indices()[0].Create(mock.NewContext(), txn, types.MakeDatums(int64(10)), kv.IntHandle(1), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
errAdmin := tk.ExecToErr("admin check table admin_test")
c.Assert(errAdmin, NotNil)
if config.CheckTableBeforeDrop {
err = tk.ExecToErr("drop table admin_test")
c.Assert(err.Error(), Equals, errAdmin.Error())
// Drop inconsistency index.
tk.MustExec("alter table admin_test drop index c1")
tk.MustExec("admin check table admin_test")
}
// checksum table test
tk.MustExec("create table checksum_with_index (id int, count int, PRIMARY KEY(id), KEY(count))")
tk.MustExec("create table checksum_without_index (id int, count int, PRIMARY KEY(id))")
r, err = tk.Exec("admin checksum table checksum_with_index, checksum_without_index")
c.Assert(err, IsNil)
res := tk.ResultSetToResult(r, Commentf("admin checksum table"))
// Mocktikv returns 1 for every table/index scan, then we will xor the checksums of a table.
// For "checksum_with_index", we have two checksums, so the result will be 1^1 = 0.
// For "checksum_without_index", we only have one checksum, so the result will be 1.
res.Sort().Check(testkit.Rows("test checksum_with_index 0 2 2", "test checksum_without_index 1 1 1"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("CREATE TABLE t1 (c2 BOOL, PRIMARY KEY (c2));")
tk.MustExec("INSERT INTO t1 SET c2 = '0';")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c3 DATETIME NULL DEFAULT '2668-02-03 17:19:31';")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx2 (c3);")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c4 bit(10) default 127;")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx3 (c4);")
tk.MustExec("admin check table t1;")
// Test admin show ddl jobs table name after table has been droped.
tk.MustExec("drop table if exists t1;")
re := tk.MustQuery("admin show ddl jobs 1")
rows := re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][2], Equals, "t1")
// Test for reverse scan get history ddl jobs when ddl history jobs queue has multiple regions.
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err = admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
// Split region for history ddl job queues.
m := meta.NewMeta(txn)
startKey := meta.DDLJobHistoryKey(m, 0)
endKey := meta.DDLJobHistoryKey(m, historyJobs[0].ID)
s.cluster.SplitKeys(startKey, endKey, int(historyJobs[0].ID/5))
historyJobs2, err := admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
c.Assert(historyJobs, DeepEquals, historyJobs2)
}
func (s *testSuiteP2) TestAdminShowDDLJobs(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_admin_show_ddl_jobs")
tk.MustExec("use test_admin_show_ddl_jobs")
tk.MustExec("create table t (a int);")
re := tk.MustQuery("admin show ddl jobs 1")
row := re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
jobID, err := strconv.Atoi(row[0].(string))
c.Assert(err, IsNil)
err = kv.RunInNewTxn(context.Background(), s.store, true, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
job, err := t.GetHistoryDDLJob(int64(jobID))
c.Assert(err, IsNil)
c.Assert(job, NotNil)
// Test for compatibility. Old TiDB version doesn't have SchemaName field, and the BinlogInfo maybe nil.
// See PR: 11561.
job.BinlogInfo = nil
job.SchemaName = ""
err = t.AddHistoryDDLJob(job, true)
c.Assert(err, IsNil)
return nil
})
c.Assert(err, IsNil)
re = tk.MustQuery("admin show ddl jobs 1")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
re = tk.MustQuery("admin show ddl jobs 1 where job_type='create table'")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
c.Assert(row[9], Equals, "<nil>")
// Test the START_TIME and END_TIME field.
re = tk.MustQuery("admin show ddl jobs where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s')")
row = re.Rows()[0]
c.Assert(row[2], Equals, "t")
c.Assert(row[9], Equals, "<nil>")
}
func (s *testSuiteP2) TestAdminChecksumOfPartitionedTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("USE test;")
tk.MustExec("DROP TABLE IF EXISTS admin_checksum_partition_test;")
tk.MustExec("CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4;")
tk.MustExec("INSERT INTO admin_checksum_partition_test VALUES (1), (2);")
r := tk.MustQuery("ADMIN CHECKSUM TABLE admin_checksum_partition_test;")
r.Check(testkit.Rows("test admin_checksum_partition_test 1 5 5"))
}
func (s *baseTestSuite) fillData(tk *testkit.TestKit, table string) {
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("create table %s(id int not null default 1, name varchar(255), PRIMARY KEY(id));", table))
// insert data
tk.MustExec(fmt.Sprintf("insert INTO %s VALUES (1, \"hello\");", table))
tk.CheckExecResult(1, 0)
tk.MustExec(fmt.Sprintf("insert into %s values (2, \"hello\");", table))
tk.CheckExecResult(1, 0)
}
type testCase struct {
data1 []byte
data2 []byte
expected []string
restData []byte
expectedMsg string
}
func checkCases(tests []testCase, ld *executor.LoadDataInfo,
c *C, tk *testkit.TestKit, ctx sessionctx.Context, selectSQL, deleteSQL string) {
origin := ld.IgnoreLines
for _, tt := range tests {
ld.IgnoreLines = origin
c.Assert(ctx.NewTxn(context.Background()), IsNil)
ctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true
ctx.GetSessionVars().StmtCtx.BadNullAsWarning = true
ctx.GetSessionVars().StmtCtx.InLoadDataStmt = true
ctx.GetSessionVars().StmtCtx.InDeleteStmt = false
data, reachLimit, err1 := ld.InsertData(context.Background(), tt.data1, tt.data2)
c.Assert(err1, IsNil)
c.Assert(reachLimit, IsFalse)
err1 = ld.CheckAndInsertOneBatch(context.Background(), ld.GetRows(), ld.GetCurBatchCnt())
c.Assert(err1, IsNil)
ld.SetMaxRowsInBatch(20000)
if tt.restData == nil {
c.Assert(data, HasLen, 0,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
} else {
c.Assert(data, DeepEquals, tt.restData,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
}
ld.SetMessage()
tk.CheckLastMessage(tt.expectedMsg)
ctx.StmtCommit()
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
r := tk.MustQuery(selectSQL)
r.Check(testutil.RowsWithSep("|", tt.expected...))
tk.MustExec(deleteSQL)
}
}
func (s *testSuiteP1) TestSelectWithoutFrom(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("select 1 + 2*3;")
r.Check(testkit.Rows("7"))
r = tk.MustQuery(`select _utf8"string";`)
r.Check(testkit.Rows("string"))
r = tk.MustQuery("select 1 order by 1;")
r.Check(testkit.Rows("1"))
}
// TestSelectBackslashN Issue 3685.
func (s *testSuiteP1) TestSelectBackslashN(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select \N;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "NULL")
c.Assert(rs.Close(), IsNil)
sql = `select "\N";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
tk.MustExec("use test;")
tk.MustExec("create table test (`\\N` int);")
tk.MustExec("insert into test values (1);")
tk.CheckExecResult(1, 0)
sql = "select * from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = `select \N from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select (\N) from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = "select `\\N` from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = "select (`\\N`) from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = `select '\N' from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
sql = `select ('\N') from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
}
// TestSelectNull Issue #4053.
func (s *testSuiteP1) TestSelectNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select nUll;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select (null);`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select null+NULL;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `null+NULL`)
c.Assert(rs.Close(), IsNil)
}
// TestSelectStringLiteral Issue #3686.
func (s *testSuiteP1) TestSelectStringLiteral(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select 'abc';`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
c.Assert(rs.Close(), IsNil)
sql = `select (('abc'));`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
c.Assert(rs.Close(), IsNil)
sql = `select 'abc'+'def';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("0"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `'abc'+'def'`)
c.Assert(rs.Close(), IsNil)
// Below checks whether leading invalid chars are trimmed.
sql = "select '\n';"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("\n"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "")
c.Assert(rs.Close(), IsNil)
sql = "select '\t col';" // Lowercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "col")
c.Assert(rs.Close(), IsNil)
sql = "select '\t Col';" // Uppercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "Col")
c.Assert(rs.Close(), IsNil)
sql = "select '\n\t 中文 col';" // Chinese char is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "中文 col")
c.Assert(rs.Close(), IsNil)
sql = "select ' \r\n .col';" // Punctuation is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, ".col")
c.Assert(rs.Close(), IsNil)
sql = "select ' 😆col';" // Emoji is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "😆col")
c.Assert(rs.Close(), IsNil)
// Below checks whether trailing invalid chars are preserved.
sql = `select 'abc ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc ")
c.Assert(rs.Close(), IsNil)
sql = `select ' abc 123 ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc 123 ")
c.Assert(rs.Close(), IsNil)
// Issue #4239.
sql = `select 'a' ' ' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
c.Assert(rs.Close(), IsNil)
sql = `select 'a' " " "string";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
c.Assert(rs.Close(), IsNil)
sql = `select 'string' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("stringstring"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "string")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssab"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" ' ' "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" ' ' "b" ' ' "d";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b d"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP1) TestSelectLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_limit")
tk.MustExec("insert INTO select_limit VALUES (3, \"hello\");")
tk.CheckExecResult(1, 0)
tk.MustExec("insert INTO select_limit VALUES (4, \"hello\");")
tk.CheckExecResult(1, 0)
r := tk.MustQuery("select * from select_limit limit 1;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from (select * from select_limit limit 1) k where id != 1;")
r.Check(testkit.Rows())
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 1;")
r.Check(testkit.Rows("2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 3;")
r.Check(testkit.Rows("4 hello"))
err := tk.ExecToErr("select * from select_limit limit 18446744073709551616 offset 3;")
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestSelectOrderBy(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_order_test")
// Test star field
r := tk.MustQuery("select * from select_order_test where id = 1 order by id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from select_order_test order by id desc limit 1 ")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from select_order_test order by id + 1 desc limit 1 ")
r.Check(testkit.Rows("2"))
// Test limit
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit
r = tk.MustQuery("select id as c1, name from select_order_test order by 2, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 100 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello"))
// Test offset overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 100;")
r.Check(testkit.Rows())
// Test limit exceeds int range.
r = tk.MustQuery("select id from select_order_test order by name, id limit 18446744073709551615;")
r.Check(testkit.Rows("1", "2"))
// Test multiple field
r = tk.MustQuery("select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit + order by
for i := 3; i <= 10; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (10086, \"hi\");")
for i := 11; i <= 20; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"hh\");", i))
}
for i := 21; i <= 30; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (1501, \"aa\");")
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 3;")
r.Check(testkit.Rows("11 hh"))
tk.MustExec("drop table select_order_test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (1, 3)")
r = tk.MustQuery("select 1-d as d from t order by d;")
r.Check(testkit.Rows("-2", "-1", "0"))
r = tk.MustQuery("select 1-d as d from t order by d + 1;")
r.Check(testkit.Rows("0", "-1", "-2"))
r = tk.MustQuery("select t.d from t order by d;")
r.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, c int)")
tk.MustExec("insert t values (1, 2, 3)")
r = tk.MustQuery("select b from (select a,b from t order by a,c) t")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select b from (select a,b from t order by a,c limit 1) t")
r.Check(testkit.Rows("2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(1, 1), (2, 2)")
tk.MustQuery("select * from t where 1 order by b").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select * from t where a between 1 and 2 order by a desc").Check(testkit.Rows("2 2", "1 1"))
// Test double read and topN is pushed down to first read plannercore.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values(1, 3, 1)")
tk.MustExec("insert into t values(2, 2, 2)")
tk.MustExec("insert into t values(3, 1, 3)")
tk.MustQuery("select * from t use index(idx) order by a desc limit 1").Check(testkit.Rows("3 1 3"))
// Test double read which needs to keep order.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key b (b))")
tk.Se.GetSessionVars().IndexLookupSize = 3
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, 10-i))
}
tk.MustQuery("select a from t use index(b) order by b").Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
}
func (s *testSuiteP1) TestOrderBy(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 int, c2 int, c3 varchar(20))")
tk.MustExec("insert into t values (1, 2, 'abc'), (2, 1, 'bcd')")
// Fix issue https://github.com/pingcap/tidb/issues/337
tk.MustQuery("select c1 as a, c1 as b from t order by c1").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select c1 as a, t.c1 as a from t order by a desc").Check(testkit.Rows("2 2", "1 1"))
tk.MustQuery("select c1 as c2 from t order by c2").Check(testkit.Rows("1", "2"))
tk.MustQuery("select sum(c1) from t order by sum(c1)").Check(testkit.Rows("3"))
tk.MustQuery("select c1 as c2 from t order by c2 + 1").Check(testkit.Rows("2", "1"))
// Order by position.
tk.MustQuery("select * from t order by 1").Check(testkit.Rows("1 2 abc", "2 1 bcd"))
tk.MustQuery("select * from t order by 2").Check(testkit.Rows("2 1 bcd", "1 2 abc"))
// Order by binary.
tk.MustQuery("select c1, c3 from t order by binary c1 desc").Check(testkit.Rows("2 bcd", "1 abc"))
tk.MustQuery("select c1, c2 from t order by binary c3").Check(testkit.Rows("1 2", "2 1"))
}
func (s *testSuiteP1) TestSelectErrorRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
err := tk.ExecToErr("select row(1, 1) from test")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select (select 1, 1) from test;")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having (select 1, 1);")
c.Assert(err, NotNil)
}
// TestIssue2612 is related with https://github.com/pingcap/tidb/issues/2612
func (s *testSuiteP1) TestIssue2612(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (
create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00',
finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00');`)
tk.MustExec(`insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22');`)
rs, err := tk.Exec(`select timediff(finish_at, create_at) from t;`)
c.Assert(err, IsNil)
req := rs.NewChunk()
err = rs.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(req.GetRow(0).GetDuration(0, 0).String(), Equals, "-46:09:02")
c.Assert(rs.Close(), IsNil)
}
// TestIssue345 is related with https://github.com/pingcap/tidb/issues/345
func (s *testSuiteP1) TestIssue345(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (c1 int);`)
tk.MustExec(`create table t2 (c2 int);`)
tk.MustExec(`insert into t1 values (1);`)
tk.MustExec(`insert into t2 values (2);`)
tk.MustExec(`update t1, t2 set t1.c1 = 2, t2.c2 = 1;`)
tk.MustExec(`update t1, t2 set c1 = 2, c2 = 1;`)
tk.MustExec(`update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1;`)
// Check t1 content
r := tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("2"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("1"))
tk.MustExec(`update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2;`)
// Check t1 content
r = tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("1"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("2"))
_, err := tk.Exec(`update t1 as a, t2 set t1.c1 = 10;`)
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestIssue5055(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (a int);`)
tk.MustExec(`create table t2 (a int);`)
tk.MustExec(`insert into t1 values(1);`)
tk.MustExec(`insert into t2 values(1);`)
result := tk.MustQuery("select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1;")
result.Check(testkit.Rows("1 1"))
}
func (s *testSuiteWithData) TestSetOperation(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1, t2, t3`)
tk.MustExec(`create table t1(a int)`)
tk.MustExec(`create table t2 like t1`)
tk.MustExec(`create table t3 like t1`)
tk.MustExec(`insert into t1 values (1),(1),(2),(3),(null)`)
tk.MustExec(`insert into t2 values (1),(2),(null),(null)`)
tk.MustExec(`insert into t3 values (2),(3)`)
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
func (s *testSuiteWithData) TestSetOperationOnDiffColType(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1, t2, t3`)
tk.MustExec(`create table t1(a int, b int)`)
tk.MustExec(`create table t2(a int, b varchar(20))`)
tk.MustExec(`create table t3(a int, b decimal(30,10))`)
tk.MustExec(`insert into t1 values (1,1),(1,1),(2,2),(3,3),(null,null)`)
tk.MustExec(`insert into t2 values (1,'1'),(2,'2'),(null,null),(null,'3')`)
tk.MustExec(`insert into t3 values (2,2.1),(3,3)`)
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
// issue-23038: wrong key range of index scan for year column
func (s *testSuiteWithData) TestIndexScanWithYearCol(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (c1 year(4), c2 int, key(c1));")
tk.MustExec("insert into t values(2001, 1);")
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain format = 'brief' " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
func (s *testSuiteP2) TestUnion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
testSQL := `drop table if exists union_test; create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `drop table if exists union_test;`
tk.MustExec(testSQL)
testSQL = `create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `insert union_test values (1),(2)`
tk.MustExec(testSQL)
testSQL = `select * from (select id from union_test union select id from union_test) t order by id;`
r := tk.MustQuery(testSQL)
r.Check(testkit.Rows("1", "2"))
r = tk.MustQuery("select 1 union all select 1")
r.Check(testkit.Rows("1", "1"))
r = tk.MustQuery("select 1 union all select 1 union select 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1, 1")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from union_test union all (select 1) order by id desc")
r.Check(testkit.Rows("2", "1", "1"))
r = tk.MustQuery("select id as a from union_test union (select 1) order by a desc")
r.Check(testkit.Rows("2", "1"))
r = tk.MustQuery(`select null as a union (select "abc") order by a`)
r.Check(testkit.Rows("<nil>", "abc"))
r = tk.MustQuery(`select "abc" as a union (select 1) order by a`)
r.Check(testkit.Rows("1", "abc"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (c int, d int)")
tk.MustExec("insert t1 values (NULL, 1)")
tk.MustExec("insert t1 values (1, 1)")
tk.MustExec("insert t1 values (1, 2)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (1, 3)")
tk.MustExec("insert t2 values (1, 1)")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (c int, d int)")
tk.MustExec("insert t3 values (3, 2)")
tk.MustExec("insert t3 values (4, 3)")
r = tk.MustQuery(`select sum(c1), c2 from (select c c1, d c2 from t1 union all select d c1, c c2 from t2 union all select c c1, d c2 from t3) x group by c2 order by c2`)
r.Check(testkit.Rows("5 1", "4 2", "4 3"))
tk.MustExec("drop table if exists t1, t2, t3")
tk.MustExec("create table t1 (a int primary key)")
tk.MustExec("create table t2 (a int primary key)")
tk.MustExec("create table t3 (a int primary key)")
tk.MustExec("insert t1 values (7), (8)")
tk.MustExec("insert t2 values (1), (9)")
tk.MustExec("insert t3 values (2), (3)")
r = tk.MustQuery("select * from t1 union all select * from t2 union all (select * from t3) order by a limit 2")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert t1 values (2), (1)")
tk.MustExec("insert t2 values (3), (4)")
r = tk.MustQuery("select * from t1 union all (select * from t2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select (select * from t1 where a != t.a union all (select * from t2 where a != t.a) order by a limit 1) from t1 t")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int unsigned primary key auto_increment, c1 int, c2 int, index c1_c2 (c1, c2))")
tk.MustExec("insert into t (c1, c2) values (1, 1)")
tk.MustExec("insert into t (c1, c2) values (1, 2)")
tk.MustExec("insert into t (c1, c2) values (2, 3)")
r = tk.MustQuery("select * from (select * from t where t.c1 = 1 union select * from t where t.id = 1) s order by s.id")
r.Check(testkit.Rows("1 1 1", "2 1 2"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (f1 DATE)")
tk.MustExec("INSERT INTO t VALUES ('1978-11-26')")
r = tk.MustQuery("SELECT f1+0 FROM t UNION SELECT f1+0 FROM t")
r.Check(testkit.Rows("19781126"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int, b int)")
tk.MustExec("INSERT INTO t VALUES ('1', '1')")
r = tk.MustQuery("select b from (SELECT * FROM t UNION ALL SELECT a, b FROM t order by a) t")
r.Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DECIMAL(4,2))")
tk.MustExec("INSERT INTO t VALUE(12.34)")
r = tk.MustQuery("SELECT 1 AS c UNION select a FROM t")
r.Sort().Check(testkit.Rows("1.00", "12.34"))
// #issue3771
r = tk.MustQuery("SELECT 'a' UNION SELECT CONCAT('a', -4)")
r.Sort().Check(testkit.Rows("a", "a-4"))
// test race
tk.MustQuery("SELECT @x:=0 UNION ALL SELECT @x:=0 UNION ALL SELECT @x")
// test field tp
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("CREATE TABLE t1 (a date)")
tk.MustExec("CREATE TABLE t2 (a date)")
tk.MustExec("SELECT a from t1 UNION select a FROM t2")
tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + " `a` date DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Move from session test.
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c double);")
tk.MustExec("create table t2 (c double);")
tk.MustExec("insert into t1 value (73);")
tk.MustExec("insert into t2 value (930);")
// If set unspecified column flen to 0, it will cause bug in union.
// This test is used to prevent the bug reappear.
tk.MustQuery("select c from t1 union (select c from t2) order by c").Check(testkit.Rows("73", "930"))
// issue 5703
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a date)")
tk.MustExec("insert into t value ('2017-01-01'), ('2017-01-02')")
r = tk.MustQuery("(select a from t where a < 0) union (select a from t where a > 0) order by a")
r.Check(testkit.Rows("2017-01-01", "2017-01-02"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(0),(0)")
tk.MustQuery("select 1 from (select a from t union all select a from t) tmp").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select 10 as a from dual union select a from t order by a desc limit 1 ").Check(testkit.Rows("10"))
tk.MustQuery("select -10 as a from dual union select a from t order by a limit 1 ").Check(testkit.Rows("-10"))
tk.MustQuery("select count(1) from (select a from t union all select a from t) tmp").Check(testkit.Rows("4"))
err := tk.ExecToErr("select 1 from (select a from t limit 1 union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongUsage))
err = tk.ExecToErr("select 1 from (select a from t order by a union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongUsage))
_, err = tk.Exec("(select a from t order by a) union all select a from t limit 1 union all select a from t limit 1")
c.Assert(terror.ErrorEqual(err, plannercore.ErrWrongUsage), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("(select a from t limit 1) union all select a from t limit 1")
c.Assert(err, IsNil)
_, err = tk.Exec("(select a from t order by a) union all select a from t order by a")
c.Assert(err, IsNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(1),(2),(3)")
tk.MustQuery("(select a from t order by a limit 2) union all (select a from t order by a desc limit 2) order by a desc limit 1,2").Check(testkit.Rows("2", "2"))
tk.MustQuery("select a from t union all select a from t order by a desc limit 5").Check(testkit.Rows("3", "3", "2", "2", "1"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select a from t group by a order by a").Check(testkit.Rows("1", "2", "2", "3", "3"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select 33 as a order by a desc limit 2").Check(testkit.Rows("33", "3"))
tk.MustQuery("select 1 union select 1 union all select 1").Check(testkit.Rows("1", "1"))
tk.MustQuery("select 1 union all select 1 union select 1").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec(`create table t1(a bigint, b bigint);`)
tk.MustExec(`create table t2(a bigint, b bigint);`)
tk.MustExec(`insert into t1 values(1, 1);`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t2 values(1, 1);`)
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustExec(`set @@sql_mode="";`)
tk.MustQuery(`select count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("128"))
tk.MustQuery(`select tmp.a, count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("1 128"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t value(1 ,2)")
tk.MustQuery("select a, b from (select a, 0 as d, b from t union all select a, 0 as d, b from t) test;").Check(testkit.Rows("1 2", "1 2"))
// #issue 8141
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("insert into t1 value(1,2),(1,1),(2,2),(2,2),(3,2),(3,2)")
tk.MustExec("set @@tidb_init_chunk_size=2;")
tk.MustQuery("select count(*) from (select a as c, a as d from t1 union all select a, b from t1) t;").Check(testkit.Rows("12"))
// #issue 8189 and #issue 8199
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustQuery("select a from t1 union select a from t1 order by (select a+1);").Check(testkit.Rows("1", "2", "3"))
// #issue 8201
for i := 0; i < 4; i++ {
tk.MustQuery("SELECT(SELECT 0 AS a FROM dual UNION SELECT 1 AS a FROM dual ORDER BY a ASC LIMIT 1) AS dev").Check(testkit.Rows("0"))
}
// #issue 8231
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE t1 (uid int(1))")
tk.MustExec("INSERT INTO t1 SELECT 150")
tk.MustQuery("SELECT 'a' UNION SELECT uid FROM t1 order by 1 desc;").Check(testkit.Rows("a", "150"))
// #issue 8196
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(3,'c'),(4,'d'),(5,'f'),(6,'e')")
tk.MustExec("analyze table t1")
tk.MustExec("analyze table t2")
_, err = tk.Exec("(select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by t1.b")
c.Assert(err.Error(), Equals, "[planner:1250]Table 't1' from one of the SELECTs cannot be used in global ORDER clause")
// #issue 9900
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b decimal(6, 3))")
tk.MustExec("insert into t values(1, 1.000)")
tk.MustQuery("select count(distinct a), sum(distinct a), avg(distinct a) from (select a from t union all select b from t) tmp;").Check(testkit.Rows("1 1.000 1.0000000"))
// #issue 23832
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bit(20), b float, c double, d int)")
tk.MustExec("insert into t values(10, 10, 10, 10), (1, -1, 2, -2), (2, -2, 1, 1), (2, 1.1, 2.1, 10.1)")
tk.MustQuery("select a from t union select 10 order by a").Check(testkit.Rows("1", "2", "10"))
}
func (s *testSuite2) TestUnionLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists union_limit")
tk.MustExec("create table union_limit (id int) partition by hash(id) partitions 30")
for i := 0; i < 60; i++ {
tk.MustExec(fmt.Sprintf("insert into union_limit values (%d)", i))
}
// Cover the code for worker count limit in the union executor.
tk.MustQuery("select * from union_limit limit 10")
}
func (s *testSuiteP1) TestNeighbouringProj(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("create table t2(a int, b int)")
tk.MustExec("insert into t1 value(1, 1), (2, 2)")
tk.MustExec("insert into t2 value(1, 1), (2, 2)")
tk.MustQuery("select sum(c) from (select t1.a as a, t1.a as c, length(t1.b) from t1 union select a, b, b from t2) t;").Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint, b bigint, c bigint);")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3);")
rs := tk.MustQuery("select cast(count(a) as signed), a as another, a from t group by a order by cast(count(a) as signed), a limit 10;")
rs.Check(testkit.Rows("1 1 1", "1 2 2", "1 3 3"))
}
func (s *testSuiteP1) TestIn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (c1 int primary key, c2 int, key c (c2));`)
for i := 0; i <= 200; i++ {
tk.MustExec(fmt.Sprintf("insert t values(%d, %d)", i, i))
}
queryStr := `select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2`
r := tk.MustQuery(queryStr)
r.Check(testkit.Rows("7", "9", "10", "17", "18", "98", "100", "106", "111", "112"))
queryStr = `select c2 from t where c1 in ('7a')`
tk.MustQuery(queryStr).Check(testkit.Rows("7"))
}
func (s *testSuiteP1) TestTablePKisHandleScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int PRIMARY KEY AUTO_INCREMENT)")
tk.MustExec("insert t values (),()")
tk.MustExec("insert t values (-100),(0)")
tests := []struct {
sql string
result [][]interface{}
}{
{
"select * from t",
testkit.Rows("-100", "1", "2", "3"),
},
{
"select * from t where a = 1",
testkit.Rows("1"),
},
{
"select * from t where a != 1",
testkit.Rows("-100", "2", "3"),
},
{
"select * from t where a >= '1.1'",
testkit.Rows("2", "3"),
},
{
"select * from t where a < '1.1'",
testkit.Rows("-100", "1"),
},
{
"select * from t where a > '-100.1' and a < 2",
testkit.Rows("-100", "1"),
},
{
"select * from t where a is null",
testkit.Rows(),
}, {
"select * from t where a is true",
testkit.Rows("-100", "1", "2", "3"),
}, {
"select * from t where a is false",
testkit.Rows(),
},
{
"select * from t where a in (1, 2)",
testkit.Rows("1", "2"),
},
{
"select * from t where a between 1 and 2",
testkit.Rows("1", "2"),
},
}
for _, tt := range tests {
result := tk.MustQuery(tt.sql)
result.Check(tt.result)
}
}
func (s *testSuite8) TestIndexScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (-1), (2), (3), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'")
result.Check(testkit.Rows("-1", "3", "5", "6", "7", "9"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (0)")
result = tk.MustQuery("select NULL from t ")
result.Check(testkit.Rows("<nil>"))
// test for double read
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (5, 0)")
tk.MustExec("insert t values (4, 0)")
tk.MustExec("insert t values (3, 0)")
tk.MustExec("insert t values (2, 0)")
tk.MustExec("insert t values (1, 0)")
tk.MustExec("insert t values (0, 0)")
result = tk.MustQuery("select * from t order by a limit 3")
result.Check(testkit.Rows("0 0", "1 0", "2 0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (0, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (3, 2)")
tk.MustExec("insert t values (4, 1)")
tk.MustExec("insert t values (5, 2)")
result = tk.MustQuery("select * from t where a < 5 and b = 1 limit 2")
result.Check(testkit.Rows("0 1", "2 1"))
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (col0)")
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (col1)")
tk.MustExec("CREATE INDEX idx_tab1_3 on tab1 (col3)")
tk.MustExec("CREATE INDEX idx_tab1_4 on tab1 (col4)")
tk.MustExec("INSERT INTO tab1 VALUES(1,37,20.85,30,10.69)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42)")
result.Check(testkit.Rows())
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (a)")
tk.MustExec("INSERT INTO tab1 VALUES(1,1,1)")
tk.MustExec("INSERT INTO tab1 VALUES(2,2,1)")
tk.MustExec("INSERT INTO tab1 VALUES(3,1,2)")
tk.MustExec("INSERT INTO tab1 VALUES(4,2,2)")
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 3 AND a = 1")
result.Check(testkit.Rows("1 1 1", "3 1 2"))
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2")
result.Check(testkit.Rows("3 1 2"))
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (b, a)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE b > 1")
result.Check(testkit.Rows("3", "4"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a varchar(3), index(a))")
tk.MustExec("insert t values('aaa'), ('aab')")
result = tk.MustQuery("select * from t where a >= 'aaaa' and a < 'aabb'")
result.Check(testkit.Rows("aab"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int primary key, b int, c int, index(c))")
tk.MustExec("insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5)")
// Test for double read and top n.
result = tk.MustQuery("select a from t where c >= 2 order by b desc limit 1")
result.Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(50) primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values('aa', 1, 1)")
tk.MustQuery("select * from t use index(idx) where a > 'a'").Check(testkit.Rows("aa 1 1"))
// fix issue9636
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (a int, KEY (a))")
result = tk.MustQuery(`SELECT * FROM (SELECT * FROM (SELECT a as d FROM t WHERE a IN ('100')) AS x WHERE x.d < "123" ) tmp_count`)
result.Check(testkit.Rows())
}
func (s *testSuiteP1) TestIndexReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int, index idx (b))")
tk.MustExec("insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by b desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
result = tk.MustQuery("select b from t where b <3 or (b >=6 and b < 8) order by b desc")
result.Check(testkit.Rows("7", "6", "2", "1", "0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, index idx (b, a))")
tk.MustExec("insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0)")
result = tk.MustQuery("select b, a from t order by b, a desc")
result.Check(testkit.Rows("0 2", "0 1", "0 0", "1 2", "1 1", "1 0", "2 2", "2 1", "2 0"))
}
func (s *testSuiteP1) TestTableReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int)")
tk.MustExec("insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by a desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1"))
result = tk.MustQuery("select a from t where a <3 or (a >=6 and a < 8) order by a desc")
result.Check(testkit.Rows("7", "6", "2", "1"))
}
func (s *testSuiteP1) TestDefaultNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int default 1, c int)")
tk.MustExec("insert t values ()")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("update t set b = NULL where a = 1")
tk.MustQuery("select * from t").Check(testkit.Rows("1 <nil> <nil>"))
tk.MustExec("update t set c = 1")
tk.MustQuery("select * from t ").Check(testkit.Rows("1 <nil> 1"))
tk.MustExec("delete from t where a = 1")
tk.MustExec("insert t (a) values (1)")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
}
func (s *testSuiteP1) TestUnsignedPKColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a));")
tk.MustExec("insert t values (1, 1, 1)")
result := tk.MustQuery("select * from t;")
result.Check(testkit.Rows("1 1 1"))
tk.MustExec("update t set c=2 where a=1;")
result = tk.MustQuery("select * from t where b=1;")
result.Check(testkit.Rows("1 1 2"))
}
func (s *testSuiteP1) TestJSON(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test_json")
tk.MustExec("create table test_json (id int, a json)")
tk.MustExec(`insert into test_json (id, a) values (1, '{"a":[1,"2",{"aa":"bb"},4],"b":true}')`)
tk.MustExec(`insert into test_json (id, a) values (2, "null")`)
tk.MustExec(`insert into test_json (id, a) values (3, null)`)
tk.MustExec(`insert into test_json (id, a) values (4, 'true')`)
tk.MustExec(`insert into test_json (id, a) values (5, '3')`)
tk.MustExec(`insert into test_json (id, a) values (5, '4.0')`)
tk.MustExec(`insert into test_json (id, a) values (6, '"string"')`)
result := tk.MustQuery(`select tj.a from test_json tj order by tj.id`)
result.Check(testkit.Rows(`{"a": [1, "2", {"aa": "bb"}, 4], "b": true}`, "null", "<nil>", "true", "3", "4", `"string"`))
// Check json_type function
result = tk.MustQuery(`select json_type(a) from test_json tj order by tj.id`)
result.Check(testkit.Rows("OBJECT", "NULL", "<nil>", "BOOLEAN", "INTEGER", "DOUBLE", "STRING"))
// Check json compare with primitives.
result = tk.MustQuery(`select a from test_json tj where a = 3`)
result.Check(testkit.Rows("3"))
result = tk.MustQuery(`select a from test_json tj where a = 4.0`)
result.Check(testkit.Rows("4"))
result = tk.MustQuery(`select a from test_json tj where a = true`)
result.Check(testkit.Rows("true"))
result = tk.MustQuery(`select a from test_json tj where a = "string"`)
result.Check(testkit.Rows(`"string"`))
// Check cast(true/false as JSON).
result = tk.MustQuery(`select cast(true as JSON)`)
result.Check(testkit.Rows(`true`))
result = tk.MustQuery(`select cast(false as JSON)`)
result.Check(testkit.Rows(`false`))
// Check two json grammar sugar.
result = tk.MustQuery(`select a->>'$.a[2].aa' as x, a->'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`bb true`))
result = tk.MustQuery(`select a->'$.a[2].aa' as x, a->>'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`"bb" true`))
// Check some DDL limits for TEXT/BLOB/JSON column.
var err error
var terr *terror.Error
_, err = tk.Exec(`create table test_bad_json(a json default '{}')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a blob default 'hello')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a text default 'world')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
// check json fields cannot be used as key.
_, err = tk.Exec(`create table test_bad_json(id int, a json, key (a))`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrJSONUsedAsKey))
// check CAST AS JSON.
result = tk.MustQuery(`select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON)`)
result.Check(testkit.Rows(`3 {} <nil>`))
tk.MustQuery("select a, count(1) from test_json group by a order by a").Check(testkit.Rows(
"<nil> 1",
"null 1",
"3 1",
"4 1",
`"string" 1`,
"{\"a\": [1, \"2\", {\"aa\": \"bb\"}, 4], \"b\": true} 1",
"true 1"))
// Check cast json to decimal.
// NOTE: this test case contains a bug, it should be uncommented after the bug is fixed.
// TODO: Fix bug https://github.com/pingcap/tidb/issues/12178
// tk.MustExec("drop table if exists test_json")
// tk.MustExec("create table test_json ( a decimal(60,2) as (JSON_EXTRACT(b,'$.c')), b json );")
// tk.MustExec(`insert into test_json (b) values
// ('{"c": "1267.1"}'),
// ('{"c": "1267.01"}'),
// ('{"c": "1267.1234"}'),
// ('{"c": "1267.3456"}'),
// ('{"c": "1234567890123456789012345678901234567890123456789012345"}'),
// ('{"c": "1234567890123456789012345678901234567890123456789012345.12345"}');`)
//
// tk.MustQuery("select a from test_json;").Check(testkit.Rows("1267.10", "1267.01", "1267.12",
// "1267.35", "1234567890123456789012345678901234567890123456789012345.00",
// "1234567890123456789012345678901234567890123456789012345.12"))
}
func (s *testSuiteP1) TestMultiUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_mu (a int primary key, b int, c int)`)
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9)`)
// Test INSERT ... ON DUPLICATE UPDATE set_lists.
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE b = 3, c = b`)
result := tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 3 3`, `4 5 6`, `7 8 9`))
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = 2, b = c+5`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 6`, `7 8 9`))
// Test UPDATE ... set_lists.
tk.MustExec(`UPDATE test_mu SET b = 0, c = b WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 0 5`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = 8, b = c WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 8`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = b, b = c WHERE a = 7`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 8`, `7 9 8`))
}
func (s *testSuiteP1) TestGeneratedColumnWrite(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
_, err := tk.Exec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (a+8) virtual)`)
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs("c").Error())
tk.MustExec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (b+8) virtual)`)
tk.MustExec(`CREATE TABLE test_gc_write_1 (a int primary key, b int, c int)`)
tests := []struct {
stmt string
err int
}{
// Can't modify generated column by values.
{`insert into test_gc_write (a, b, c) values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
{`insert into test_gc_write values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by select clause.
{`insert into test_gc_write select 1, 1, 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by on duplicate clause.
{`insert into test_gc_write (a, b) values (1, 1) on duplicate key update c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by set.
{`insert into test_gc_write set a = 1, b = 1, c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by update clause.
{`update test_gc_write set c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by multi-table update clause.
{`update test_gc_write, test_gc_write_1 set test_gc_write.c = 1`, mysql.ErrBadGeneratedColumn},
// Can insert without generated columns.
{`insert into test_gc_write (a, b) values (1, 1)`, 0},
{`insert into test_gc_write set a = 2, b = 2`, 0},
{`insert into test_gc_write (b) select c from test_gc_write`, 0},
// Can update without generated columns.
{`update test_gc_write set b = 2 where a = 2`, 0},
{`update test_gc_write t1, test_gc_write_1 t2 set t1.b = 3, t2.b = 4`, 0},
// But now we can't do this, just as same with MySQL 5.7:
{`insert into test_gc_write values (1, 1)`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write select 1, 1`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (c) select a, b from test_gc_write`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (b, c) select a, b from test_gc_write`, mysql.ErrBadGeneratedColumn},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil, Commentf("sql is `%v`", tt.stmt))
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(tt.err), Commentf("sql is %v", tt.stmt))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests select generated columns from table.
// They should be calculated from their generation expressions.
func (s *testSuiteP1) TestGeneratedColumnRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_gc_read(a int primary key, b int, c int as (a+b), d int as (a*b) stored, e int as (c*2))`)
result := tk.MustQuery(`SELECT generation_expression FROM information_schema.columns WHERE table_name = 'test_gc_read' AND column_name = 'd'`)
result.Check(testkit.Rows("`a` * `b`"))
// Insert only column a and b, leave c and d be calculated from them.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (0,null),(1,2),(3,4)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read SET a = 5, b = 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 10 15 50 30`))
tk.MustExec(`REPLACE INTO test_gc_read (a, b) VALUES (5, 6)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 6 11 30 22`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE b = 9`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 9 14 45 28`))
// Test select only-generated-column-without-dependences.
result = tk.MustQuery(`SELECT c, d FROM test_gc_read`)
result.Check(testkit.Rows(`<nil> <nil>`, `3 2`, `7 12`, `14 45`))
// Test select only virtual generated column that refers to other virtual generated columns.
result = tk.MustQuery(`SELECT e FROM test_gc_read`)
result.Check(testkit.Rows(`<nil>`, `6`, `14`, `28`))
// Test order of on duplicate key update list.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE a = 6, b = a`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `6 6 12 36 24`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (6, 8) ON DUPLICATE KEY UPDATE b = 8, a = b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test where-conditions on virtual/stored generated columns.
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 7`)
result.Check(testkit.Rows(`3 4 7 12 14`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 64`)
result.Check(testkit.Rows(`8 8 16 64 32`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE e = 6`)
result.Check(testkit.Rows(`1 2 3 2 6`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read SET a = a + 100 WHERE c = 7`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 107`)
result.Check(testkit.Rows(`103 4 107 412 214`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read m SET m.a = m.a + 100 WHERE c = 107`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 207`)
result.Check(testkit.Rows(`203 4 207 812 414`))
tk.MustExec(`UPDATE test_gc_read SET a = a - 200 WHERE d = 812`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 12`)
result.Check(testkit.Rows(`3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read set a = 4, b = d + 1`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`,
`4 <nil> <nil> <nil> <nil>`, `8 8 16 64 32`))
tk.MustExec(`DELETE FROM test_gc_read where a = 4`)
// Test on-conditions on virtual/stored generated columns.
tk.MustExec(`CREATE TABLE test_gc_help(a int primary key, b int, c int, d int, e int)`)
tk.MustExec(`INSERT INTO test_gc_help(a, b, c, d, e) SELECT * FROM test_gc_read`)
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.c = t2.c ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.d = t2.d ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.e = t2.e ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test generated column in subqueries.
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.a not in (SELECT t.a FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`))
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.c in (SELECT t.c FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT tt.b FROM test_gc_read tt WHERE tt.a = (SELECT max(t.a) FROM test_gc_read t WHERE t.c = tt.c) ORDER BY b`)
result.Check(testkit.Rows(`2`, `4`, `8`))
// Test aggregation on virtual/stored generated columns.
result = tk.MustQuery(`SELECT c, sum(a) aa, max(d) dd, sum(e) ee FROM test_gc_read GROUP BY c ORDER BY aa`)
result.Check(testkit.Rows(`<nil> 0 <nil> <nil>`, `3 1 2 6`, `7 3 12 14`, `16 8 64 32`))
result = tk.MustQuery(`SELECT a, sum(c), sum(d), sum(e) FROM test_gc_read GROUP BY a ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 3 2 6`, `3 7 12 14`, `8 16 64 32`))
// Test multi-update on generated columns.
tk.MustExec(`UPDATE test_gc_read m, test_gc_read n SET m.b = m.b + 10, n.b = n.b + 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 12 13 12 26`, `3 14 17 42 34`, `8 18 26 144 52`))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(8)")
tk.MustExec("update test_gc_read set a = a+1 where a in (select a from t)")
result = tk.MustQuery("select * from test_gc_read order by a")
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 12 13 12 26`, `3 14 17 42 34`, `9 18 27 162 54`))
// Test different types between generation expression and generated column.
tk.MustExec(`CREATE TABLE test_gc_read_cast(a VARCHAR(255), b VARCHAR(255), c INT AS (JSON_EXTRACT(a, b)), d INT AS (JSON_EXTRACT(a, b)) STORED)`)
tk.MustExec(`INSERT INTO test_gc_read_cast (a, b) VALUES ('{"a": "3"}', '$.a')`)
result = tk.MustQuery(`SELECT c, d FROM test_gc_read_cast`)
result.Check(testkit.Rows(`3 3`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_1(a VARCHAR(255), b VARCHAR(255), c ENUM("red", "yellow") AS (JSON_UNQUOTE(JSON_EXTRACT(a, b))))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "yellow"}', '$.a')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_1`)
result.Check(testkit.Rows(`yellow`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_2( a JSON, b JSON AS (a->>'$.a'))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_2(a) VALUES ('{"a": "{ \\\"key\\\": \\\"\\u6d4b\\\" }"}')`)
result = tk.MustQuery(`SELECT b FROM test_gc_read_cast_2`)
result.Check(testkit.Rows(`{"key": "测"}`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_3( a JSON, b JSON AS (a->>'$.a'), c INT AS (b * 3.14) )`)
tk.MustExec(`INSERT INTO test_gc_read_cast_3(a) VALUES ('{"a": "5"}')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_3`)
result.Check(testkit.Rows(`16`))
_, err := tk.Exec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "invalid"}', '$.a')`)
c.Assert(err, NotNil)
// Test read generated columns after drop some irrelevant column
tk.MustExec(`DROP TABLE IF EXISTS test_gc_read_m`)
tk.MustExec(`CREATE TABLE test_gc_read_m (a int primary key, b int, c int as (a+1), d int as (c*2))`)
tk.MustExec(`INSERT INTO test_gc_read_m(a) values (1), (2)`)
tk.MustExec(`ALTER TABLE test_gc_read_m DROP b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read_m`)
result.Check(testkit.Rows(`1 2 4`, `2 3 6`))
// Test not null generated columns.
tk.MustExec(`CREATE TABLE test_gc_read_1(a int primary key, b int, c int as (a+b) not null, d int as (a*b) stored)`)
tk.MustExec(`CREATE TABLE test_gc_read_2(a int primary key, b int, c int as (a+b), d int as (a*b) stored not null)`)
tests := []struct {
stmt string
err int
}{
// Can't insert these records, because generated columns are not null.
{`insert into test_gc_read_1(a, b) values (1, null)`, mysql.ErrBadNull},
{`insert into test_gc_read_2(a, b) values (1, null)`, mysql.ErrBadNull},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(tt.err))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests generated columns using point get and batch point get
func (s *testSuiteP1) TestGeneratedColumnPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists tu")
tk.MustExec("CREATE TABLE tu(a int, b int, c int GENERATED ALWAYS AS (a + b) VIRTUAL, d int as (a * b) stored, " +
"e int GENERATED ALWAYS as (b * 2) VIRTUAL, PRIMARY KEY (a), UNIQUE KEY ukc (c), unique key ukd(d), key ke(e))")
tk.MustExec("insert into tu(a, b) values(1, 2)")
tk.MustExec("insert into tu(a, b) values(5, 6)")
tk.MustQuery("select * from tu for update").Check(testkit.Rows("1 2 3 2 4", "5 6 11 30 12"))
tk.MustQuery("select * from tu where a = 1").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where a in (1, 2)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where c in (1, 2, 3)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where c = 3").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select d, e from tu where c = 3").Check(testkit.Rows("2 4"))
tk.MustQuery("select * from tu where d in (1, 2, 3)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where d = 2").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select c, d from tu where d = 2").Check(testkit.Rows("3 2"))
tk.MustQuery("select d, e from tu where e = 4").Check(testkit.Rows("2 4"))
tk.MustQuery("select * from tu where e = 4").Check(testkit.Rows("1 2 3 2 4"))
tk.MustExec("update tu set a = a + 1, b = b + 1 where c = 11")
tk.MustQuery("select * from tu for update").Check(testkit.Rows("1 2 3 2 4", "6 7 13 42 14"))
tk.MustQuery("select * from tu where a = 6").Check(testkit.Rows("6 7 13 42 14"))
tk.MustQuery("select * from tu where c in (5, 6, 13)").Check(testkit.Rows("6 7 13 42 14"))
tk.MustQuery("select b, c, e, d from tu where c = 13").Check(testkit.Rows("7 13 14 42"))
tk.MustQuery("select a, e, d from tu where c in (5, 6, 13)").Check(testkit.Rows("6 14 42"))
tk.MustExec("drop table if exists tu")
}
func (s *testSuiteP2) TestToPBExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.4, 2.4)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a < 2.399999")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where a <= 1.1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b >= 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where not (b = 1)")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b&1 = a|1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b != 2 and b <=> 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b in (3)")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b not in (1, 2)")
result.Check(testkit.Rows("3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a varchar(255), b int)")
tk.MustExec("insert t values ('abc123', 1)")
tk.MustExec("insert t values ('ab123', 2)")
result = tk.MustQuery("select * from t where a like 'ab%'")
result.Check(testkit.Rows("abc123 1", "ab123 2"))
result = tk.MustQuery("select * from t where a like 'ab_12'")
result.Check(nil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustExec("insert t values (1)")
tk.MustExec("insert t values (2)")
result = tk.MustQuery("select * from t where not (a = 1)")
result.Check(testkit.Rows("2"))
result = tk.MustQuery("select * from t where not(not (a = 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select * from t where not(a != 1 and a != 2)")
result.Check(testkit.Rows("1", "2"))
}
func (s *testSuiteP2) TestDatumXAPI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.2, 2.2)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a time(3), b time, index idx_a (a))")
tk.MustExec("insert t values ('11:11:11', '11:11:11')")
tk.MustExec("insert t values ('11:11:12', '11:11:12')")
tk.MustExec("insert t values ('11:11:13', '11:11:13')")
result = tk.MustQuery("select * from t where a > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
result = tk.MustQuery("select * from t where b > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
}
func (s *testSuiteP2) TestSQLMode(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a tinyint not null)")
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert t values ()")
c.Check(err, NotNil)
_, err = tk.Exec("insert t values ('1000')")
c.Check(err, NotNil)
tk.MustExec("create table if not exists tdouble (a double(3,2))")
_, err = tk.Exec("insert tdouble values (10.23)")
c.Check(err, NotNil)
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values ()")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1364 Field 'a' doesn't have a default value"))
tk.MustExec("insert t values (null)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert ignore t values (null)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t select null")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t values (1000)")
tk.MustQuery("select * from t order by a").Check(testkit.Rows("0", "0", "0", "0", "127"))
tk.MustExec("insert tdouble values (10.23)")
tk.MustQuery("select * from tdouble").Check(testkit.Rows("9.99"))
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
tk.MustExec("set @@global.sql_mode = ''")
// Disable global variable cache, so load global session variable take effect immediate.
s.domain.GetGlobalVarsCache().Disable()
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("drop table if exists t2")
tk2.MustExec("create table t2 (a varchar(3))")
tk2.MustExec("insert t2 values ('abcd')")
tk2.MustQuery("select * from t2").Check(testkit.Rows("abc"))
// session1 is still in strict mode.
_, err = tk.Exec("insert t2 values ('abcd')")
c.Check(err, NotNil)
// Restore original global strict mode.
tk.MustExec("set @@global.sql_mode = 'STRICT_TRANS_TABLES'")
}
func (s *testSuiteP2) TestTableDual(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
result := tk.MustQuery("Select 1")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select count(*) from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual where 1")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
tk.MustQuery("select t1.* from t t1, t t2 where t1.a=t2.a and 1=0").Check(testkit.Rows())
}
func (s *testSuiteP2) TestTableScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use information_schema")
result := tk.MustQuery("select * from schemata")
// There must be these tables: information_schema, mysql, performance_schema and test.
c.Assert(len(result.Rows()), GreaterEqual, 4)
tk.MustExec("use test")
tk.MustExec("create database mytest")
rowStr1 := fmt.Sprintf("%s %s %s %s %v", "def", "mysql", "utf8mb4", "utf8mb4_bin", nil)
rowStr2 := fmt.Sprintf("%s %s %s %s %v", "def", "mytest", "utf8mb4", "utf8mb4_bin", nil)
tk.MustExec("use information_schema")
result = tk.MustQuery("select * from schemata where schema_name = 'mysql'")
result.Check(testkit.Rows(rowStr1))
result = tk.MustQuery("select * from schemata where schema_name like 'my%'")
result.Check(testkit.Rows(rowStr1, rowStr2))
result = tk.MustQuery("select 1 from tables limit 1")
result.Check(testkit.Rows("1"))
}
func (s *testSuiteP2) TestAdapterStatement(c *C) {
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
se.GetSessionVars().TxnCtx.InfoSchema = domain.GetDomain(se).InfoSchema()
compiler := &executor.Compiler{Ctx: se}
stmtNode, err := s.ParseOneStmt("select 1", "", "")
c.Check(err, IsNil)
stmt, err := compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "select 1")
stmtNode, err = s.ParseOneStmt("create table test.t (a int)", "", "")
c.Check(err, IsNil)
stmt, err = compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "create table test.t (a int)")
}
func (s *testSuiteP2) TestIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use mysql")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select * from help_topic where name='aaa'": false,
"select 1 from help_topic where name='aaa'": false,
"select * from help_topic where help_topic_id=1": true,
"select * from help_topic where help_category_id=1": false,
}
infoSchema := ctx.GetSessionVars().GetInfoSchema().(infoschema.InfoSchema)
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
err = plannercore.Preprocess(ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
p, _, err := planner.Optimize(context.TODO(), ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
ret, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(err, IsNil)
c.Assert(ret, Equals, result)
}
}
func (s *testSuiteP2) TestClusteredIndexIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop database if exists test_cluster_index_is_point_get;")
tk.MustExec("create database test_cluster_index_is_point_get;")
tk.MustExec("use test_cluster_index_is_point_get;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a varchar(255), b int, c char(10), primary key (c, a));")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select 1 from t where a='x'": false,
"select * from t where c='x'": false,
"select * from t where a='x' and c='x'": true,
"select * from t where a='x' and c='x' and b=1": false,
}
infoSchema := ctx.GetSessionVars().GetInfoSchema().(infoschema.InfoSchema)
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
err = plannercore.Preprocess(ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
p, _, err := planner.Optimize(context.TODO(), ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
ret, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(err, IsNil)
c.Assert(ret, Equals, result)
}
}
func (s *testSerialSuite) TestPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table point_get (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into point_get values (1, 1, 1)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "pointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from point_get where b = 1")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSerialSuite) TestBatchPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table batch_point_get (a int, b int, c int, unique key k_b(a, b, c))`)
tk1.MustExec("insert into batch_point_get values (1, 1, 1), (2, 3, 4), (3, 4, 5)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "batchPointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from batch_point_get where (a, b, c) in ((1, 1, 1))")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update batch_point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSerialSuite) TestSplitRegionTimeout(c *C) {
c.Assert(tikvutil.MockSplitRegionTimeout.Enable(`return(true)`), IsNil)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
tk.MustExec(`set @@tidb_wait_split_region_timeout=1`)
// result 0 0 means split 0 region and 0 region finish scatter regions before timeout.
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("0 0"))
err := tikvutil.MockSplitRegionTimeout.Disable()
c.Assert(err, IsNil)
// Test scatter regions timeout.
c.Assert(tikvutil.MockScatterRegionTimeout.Enable(`return(true)`), IsNil)
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
err = tikvutil.MockScatterRegionTimeout.Disable()
c.Assert(err, IsNil)
// Test pre-split with timeout.
tk.MustExec("drop table if exists t")
tk.MustExec("set @@global.tidb_scatter_region=1;")
c.Assert(tikvutil.MockScatterRegionTimeout.Enable(`return(true)`), IsNil)
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
start := time.Now()
tk.MustExec("create table t (a int, b int) partition by hash(a) partitions 5;")
c.Assert(time.Since(start).Seconds(), Less, 10.0)
err = tikvutil.MockScatterRegionTimeout.Disable()
c.Assert(err, IsNil)
}
func (s *testSuiteP2) TestRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 3)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (2, 3)")
result := tk.MustQuery("select * from t where (c, d) < (2,2)")
result.Check(testkit.Rows("1 1", "1 3", "2 1"))
result = tk.MustQuery("select * from t where (1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where row(1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where (c, d) = (select * from t where (c,d) = (1,1))")
result.Check(testkit.Rows("1 1"))
result = tk.MustQuery("select * from t where (c, d) = (select * from t k where (t.c,t.d) = (c,d))")
result.Check(testkit.Rows("1 1", "1 3", "2 1", "2 3"))
result = tk.MustQuery("select (1, 2, 3) < (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 3)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 1, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) >= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) = (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) != (2, 3, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (row(1, 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 0) in (row(1, 1))")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (select 1, 1)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > row(1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > (select 1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select 1 > (select 1)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (select 1)")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("insert t1 values (1,2),(1,null)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (0,0)")
tk.MustQuery("select * from t2 where (1,2) in (select * from t1)").Check(testkit.Rows("0 0"))
tk.MustQuery("select * from t2 where (1,2) not in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (1,1) not in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (1,null) in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (null,null) in (select * from t1)").Check(testkit.Rows())
tk.MustExec("delete from t1 where a=1 and b=2")
tk.MustQuery("select (1,1) in (select * from t2) from t1").Check(testkit.Rows("0"))
tk.MustQuery("select (1,1) not in (select * from t2) from t1").Check(testkit.Rows("1"))
tk.MustQuery("select (1,1) in (select 1,1 from t2) from t1").Check(testkit.Rows("1"))
tk.MustQuery("select (1,1) not in (select 1,1 from t2) from t1").Check(testkit.Rows("0"))
// MySQL 5.7 returns 1 for these 2 queries, which is wrong.
tk.MustQuery("select (1,null) not in (select 1,1 from t2) from t1").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (t1.a,null) not in (select 1,1 from t2) from t1").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (1,null) in (select * from t1)").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (1,null) not in (select * from t1)").Check(testkit.Rows("<nil>"))
}
func (s *testSuiteP2) TestColumnName(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
// disable only full group by
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
rs, err := tk.Exec("select 1 + c, count(*) from t")
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "1 + c")
c.Check(fields[0].ColumnAsName.L, Equals, "1 + c")
c.Check(fields[1].Column.Name.L, Equals, "count(*)")
c.Check(fields[1].ColumnAsName.L, Equals, "count(*)")
c.Assert(rs.Close(), IsNil)
rs, err = tk.Exec("select (c) > all (select c from t) from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.L, Equals, "(c) > all (select c from t)")
c.Check(fields[0].ColumnAsName.L, Equals, "(c) > all (select c from t)")
c.Assert(rs.Close(), IsNil)
tk.MustExec("begin")
tk.MustExec("insert t values(1,1)")
rs, err = tk.Exec("select c d, d c from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "d")
c.Check(fields[1].Column.Name.L, Equals, "d")
c.Check(fields[1].ColumnAsName.L, Equals, "c")
c.Assert(rs.Close(), IsNil)
// Test case for query a column of a table.
// In this case, all attributes have values.
rs, err = tk.Exec("select c as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "t")
c.Check(fields[0].TableAsName.L, Equals, "t2")
c.Check(fields[0].DBName.L, Equals, "test")
c.Assert(rs.Close(), IsNil)
// Test case for query a expression which only using constant inputs.
// In this case, the table, org_table and database attributes will all be empty.
rs, err = tk.Exec("select hour(1) as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "a")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "")
c.Check(fields[0].TableAsName.L, Equals, "")
c.Check(fields[0].DBName.L, Equals, "")
c.Assert(rs.Close(), IsNil)
// Test case for query a column wrapped with parentheses and unary plus.
// In this case, the column name should be its original name.
rs, err = tk.Exec("select (c), (+c), +(c), +(+(c)), ++c from t")
c.Check(err, IsNil)
fields = rs.Fields()
for i := 0; i < 5; i++ {
c.Check(fields[i].Column.Name.L, Equals, "c")
c.Check(fields[i].ColumnAsName.L, Equals, "c")
}
c.Assert(rs.Close(), IsNil)
// Test issue https://github.com/pingcap/tidb/issues/9639 .
// Both window function and expression appear in final result field.
tk.MustExec("set @@tidb_enable_window_function = 1")
rs, err = tk.Exec("select 1+1, row_number() over() num from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "1+1")
c.Assert(fields[0].ColumnAsName.L, Equals, "1+1")
c.Assert(fields[1].Column.Name.L, Equals, "num")
c.Assert(fields[1].ColumnAsName.L, Equals, "num")
tk.MustExec("set @@tidb_enable_window_function = 0")
c.Assert(rs.Close(), IsNil)
rs, err = tk.Exec("select if(1,c,c) from t;")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "if(1,c,c)")
// It's a compatibility issue. Should be empty instead.
c.Assert(fields[0].ColumnAsName.L, Equals, "if(1,c,c)")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP2) TestSelectVar(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (d int)")
tk.MustExec("insert into t values(1), (2), (1)")
// This behavior is different from MySQL.
result := tk.MustQuery("select @a, @a := d+1 from t")
result.Check(testkit.Rows("<nil> 2", "2 3", "3 2"))
// Test for PR #10658.
tk.MustExec("select SQL_BIG_RESULT d from t group by d")
tk.MustExec("select SQL_SMALL_RESULT d from t group by d")
tk.MustExec("select SQL_BUFFER_RESULT d from t group by d")
}
func (s *testSuiteP2) TestHistoryRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists history_read")
tk.MustExec("create table history_read (a int)")
tk.MustExec("insert history_read values (1)")
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20060102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
// Set snapshot to a time before save point will fail.
_, err := tk.Exec("set @@tidb_snapshot = '2006-01-01 15:04:05.999999'")
c.Assert(terror.ErrorEqual(err, variable.ErrSnapshotTooOld), IsTrue, Commentf("err %v", err))
// SnapshotTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0))
curVer1, _ := s.store.CurrentVersion(oracle.GlobalTxnScope)
time.Sleep(time.Millisecond)
snapshotTime := time.Now()
time.Sleep(time.Millisecond)
curVer2, _ := s.store.CurrentVersion(oracle.GlobalTxnScope)
tk.MustExec("insert history_read values (2)")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
ctx := tk.Se.(sessionctx.Context)
snapshotTS := ctx.GetSessionVars().SnapshotTS
c.Assert(snapshotTS, Greater, curVer1.Ver)
c.Assert(snapshotTS, Less, curVer2.Ver)
tk.MustQuery("select * from history_read").Check(testkit.Rows("1"))
_, err = tk.Exec("insert history_read values (2)")
c.Assert(err, NotNil)
_, err = tk.Exec("update history_read set a = 3 where a = 1")
c.Assert(err, NotNil)
_, err = tk.Exec("delete from history_read where a = 1")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("insert history_read values (3)")
tk.MustExec("update history_read set a = 4 where a = 3")
tk.MustExec("delete from history_read where a = 1")
time.Sleep(time.Millisecond)
snapshotTime = time.Now()
time.Sleep(time.Millisecond)
tk.MustExec("alter table history_read add column b int")
tk.MustExec("insert history_read values (8, 8), (9, 9)")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tsoStr := strconv.FormatUint(oracle.EncodeTSO(snapshotTime.UnixNano()/int64(time.Millisecond)), 10)
tk.MustExec("set @@tidb_snapshot = '" + tsoStr + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
}
func (s *testSuite2) TestLowResolutionTSORead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@autocommit=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists low_resolution_tso")
tk.MustExec("create table low_resolution_tso(a int)")
tk.MustExec("insert low_resolution_tso values (1)")
// enable low resolution tso
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsFalse)
_, err := tk.Exec("set @@tidb_low_resolution_tso = 'on'")
c.Assert(err, IsNil)
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsTrue)
time.Sleep(3 * time.Second)
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("1"))
_, err = tk.Exec("update low_resolution_tso set a = 2")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_low_resolution_tso = 'off'")
tk.MustExec("update low_resolution_tso set a = 2")
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("2"))
}
func (s *testSuite) TestScanControlSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx_b(b))")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select (select count(1) k from t s where s.b = t1.c) from t t1").Sort().Check(testkit.Rows("0", "1", "3", "3"))
}
func (s *testSuite) TestSimpleDAG(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int)")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t where a = 4").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select a from t limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select a from t order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustQuery("select a from t order by a desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t order by b desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where a < 3").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where b > 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where b > 1 and a < 3").Check(testkit.Rows())
tk.MustQuery("select count(*) from t where b > 1 and a < 3").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t").Check(testkit.Rows("4"))
tk.MustQuery("select count(*), c from t group by c order by c").Check(testkit.Rows("2 1", "1 2", "1 3"))
tk.MustQuery("select sum(c) as s from t group by b order by s").Check(testkit.Rows("3", "4"))
tk.MustQuery("select avg(a) as s from t group by b order by s").Check(testkit.Rows("2.0000", "4.0000"))
tk.MustQuery("select sum(distinct c) from t group by b").Check(testkit.Rows("3", "3"))
tk.MustExec("create index i on t(c,b)")
tk.MustQuery("select a from t where c = 1").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where c = 1 and a < 2").Check(testkit.Rows("1"))
tk.MustQuery("select a from t where c = 1 order by a limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from t where c = 1 ").Check(testkit.Rows("2"))
tk.MustExec("create index i1 on t(b)")
tk.MustQuery("select c from t where b = 2").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 2").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select count(*) from t where b = 1").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 1 and a > 1 limit 1").Check(testkit.Rows("2 1 1"))
// Test time push down.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, c1 datetime);")
tk.MustExec("insert into t values (1, '2015-06-07 12:12:12')")
tk.MustQuery("select id from t where c1 = '2015-06-07 12:12:12'").Check(testkit.Rows("1"))
// Test issue 17816
tk.MustExec("drop table if exists t0")
tk.MustExec("CREATE TABLE t0(c0 INT)")
tk.MustExec("INSERT INTO t0 VALUES (100000)")
tk.MustQuery("SELECT * FROM t0 WHERE NOT SPACE(t0.c0)").Check(testkit.Rows("100000"))
}
func (s *testSuite) TestTimestampTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (ts timestamp)")
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t values ('2017-04-27 22:40:42')")
// The timestamp will get different value if time_zone session variable changes.
tests := []struct {
timezone string
expect string
}{
{"+10:00", "2017-04-28 08:40:42"},
{"-6:00", "2017-04-27 16:40:42"},
}
for _, tt := range tests {
tk.MustExec(fmt.Sprintf("set time_zone = '%s'", tt.timezone))
tk.MustQuery("select * from t").Check(testkit.Rows(tt.expect))
}
// For issue https://github.com/pingcap/tidb/issues/3467
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
uid int(11) DEFAULT NULL,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
ip varchar(128) DEFAULT NULL,
PRIMARY KEY (id),
KEY i_datetime (datetime),
KEY i_userid (uid)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1");`)
r := tk.MustQuery("select datetime from t1;") // Cover TableReaderExec
r.Check(testkit.Rows("2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10")) // Cover IndexReaderExec
r = tk.MustQuery("select * from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("123381351 1734 2014-03-31 08:57:10 127.0.0.1")) // Cover IndexLookupExec
// For issue https://github.com/pingcap/tidb/issues/3485
tk.MustExec("set time_zone = 'Asia/Shanghai'")
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10");`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
tk.MustExec(`alter table t1 add key i_datetime (datetime);`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery(`select * from t1;`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10"))
}
func (s *testSuite) TestTimestampDefaultValueTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "2019-01-17 14:46:14")`)
tk.MustExec("insert into t set a=1")
r := tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 14:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 06:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14"))
// Test the column's version is greater than ColumnInfoVersion1.
sctx := tk.Se.(sessionctx.Context)
is := domain.GetDomain(sctx).InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tb.Cols()[1].Version = model.ColumnInfoVersion1 + 1
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14", "3 2019-01-17 06:46:14"))
tk.MustExec("delete from t where a=3")
// Change time zone back.
tk.MustExec("set time_zone = '+08:00'")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 14:46:14", "2 2019-01-17 14:46:14"))
tk.MustExec("set time_zone = '-08:00'")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-16 22:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// test zero default value in multiple time zone.
defer tk.MustExec(fmt.Sprintf("set @@sql_mode='%s'", tk.MustQuery("select @@sql_mode").Rows()[0][0]))
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION';")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "0000-00-00 00")`)
tk.MustExec("insert into t set a=1")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '-08:00'")
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 0000-00-00 00:00:00", "2 0000-00-00 00:00:00", "3 0000-00-00 00:00:00"))
// test add timestamp column default current_timestamp.
tk.MustExec(`drop table if exists t`)
tk.MustExec(`set time_zone = 'Asia/Shanghai'`)
tk.MustExec(`create table t (a int)`)
tk.MustExec(`insert into t set a=1`)
tk.MustExec(`alter table t add column b timestamp not null default current_timestamp;`)
timeIn8 := tk.MustQuery("select b from t").Rows()[0][0]
tk.MustExec(`set time_zone = '+00:00'`)
timeIn0 := tk.MustQuery("select b from t").Rows()[0][0]
c.Assert(timeIn8 != timeIn0, IsTrue, Commentf("%v == %v", timeIn8, timeIn0))
datumTimeIn8, err := expression.GetTimeValue(tk.Se, timeIn8, mysql.TypeTimestamp, 0)
c.Assert(err, IsNil)
tIn8To0 := datumTimeIn8.GetMysqlTime()
timeZoneIn8, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
err = tIn8To0.ConvertTimeZone(timeZoneIn8, time.UTC)
c.Assert(err, IsNil)
c.Assert(timeIn0 == tIn8To0.String(), IsTrue, Commentf("%v != %v", timeIn0, tIn8To0.String()))
// test add index.
tk.MustExec(`alter table t add index(b);`)
tk.MustExec("admin check table t")
tk.MustExec(`set time_zone = '+05:00'`)
tk.MustExec("admin check table t")
}
func (s *testSuite) TestTiDBCurrentTS(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
tk.MustExec("begin")
rows := tk.MustQuery("select @@tidb_current_ts").Rows()
tsStr := rows[0][0].(string)
txn, err := tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(tsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
tk.MustExec("begin")
rows = tk.MustQuery("select @@tidb_current_ts").Rows()
newTsStr := rows[0][0].(string)
txn, err = tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(newTsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
c.Assert(newTsStr, Not(Equals), tsStr)
tk.MustExec("commit")
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
_, err = tk.Exec("set @@tidb_current_ts = '1'")
c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err))
}
func (s *testSuite) TestTiDBLastTxnInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustQuery("select @@tidb_last_txn_info").Check(testkit.Rows(""))
tk.MustExec("insert into t values (1)")
rows1 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows1[0][0].(string), Greater, "0")
c.Assert(rows1[0][0].(string), Less, rows1[0][1].(string))
tk.MustExec("begin")
tk.MustQuery("select a from t where a = 1").Check(testkit.Rows("1"))
rows2 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), @@tidb_current_ts").Rows()
tk.MustExec("commit")
rows3 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows2[0][0], Equals, rows1[0][0])
c.Assert(rows2[0][1], Equals, rows1[0][1])
c.Assert(rows3[0][0], Equals, rows1[0][0])
c.Assert(rows3[0][1], Equals, rows1[0][1])
c.Assert(rows2[0][1], Less, rows2[0][2])
tk.MustExec("begin")
tk.MustExec("update t set a = a + 1 where a = 1")
rows4 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), @@tidb_current_ts").Rows()
tk.MustExec("commit")
rows5 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows4[0][0], Equals, rows1[0][0])
c.Assert(rows4[0][1], Equals, rows1[0][1])
c.Assert(rows4[0][2], Equals, rows5[0][0])
c.Assert(rows4[0][1], Less, rows4[0][2])
c.Assert(rows4[0][2], Less, rows5[0][1])
tk.MustExec("begin")
tk.MustExec("update t set a = a + 1 where a = 2")
tk.MustExec("rollback")
rows6 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows6[0][0], Equals, rows5[0][0])
c.Assert(rows6[0][1], Equals, rows5[0][1])
tk.MustExec("begin optimistic")
tk.MustExec("insert into t values (2)")
_, err := tk.Exec("commit")
c.Assert(err, NotNil)
rows7 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), json_extract(@@tidb_last_txn_info, '$.error')").Rows()
c.Assert(rows7[0][0], Greater, rows5[0][0])
c.Assert(rows7[0][1], Equals, "0")
c.Assert(strings.Contains(err.Error(), rows7[0][1].(string)), IsTrue)
_, err = tk.Exec("set @@tidb_last_txn_info = '{}'")
c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err))
}
func (s *testSerialSuite) TestTiDBLastTxnInfoCommitMode(c *C) {
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = time.Second
})
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, v int)")
tk.MustExec("insert into t values (1, 1)")
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"async_commit"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Assert(rows[0][0], Equals, `"1pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = 0
})
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "true")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "true")
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "true")
c.Assert(rows[0][2], Equals, "true")
}
func (s *testSuite) TestTiDBLastQueryInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, v int)")
tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.start_ts')").Check(testkit.Rows("0 0"))
toUint64 := func(str interface{}) uint64 {
res, err := strconv.ParseUint(str.(string), 10, 64)
c.Assert(err, IsNil)
return res
}
tk.MustExec("select * from t")
rows := tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk.MustExec("insert into t values (1, 10)")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
// tidb_last_txn_info is still valid after checking query info.
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0].(string), Less, rows[0][1].(string))
tk.MustExec("begin pessimistic")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("update t set v = 11 where a = 1")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk.MustExec("update t set v = 12 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(toUint64(rows[0][0]), Less, toUint64(rows[0][1]))
tk.MustExec("commit")
tk.MustExec("set transaction isolation level read committed")
tk.MustExec("begin pessimistic")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(toUint64(rows[0][0]), Less, toUint64(rows[0][1]))
tk.MustExec("rollback")
}
func (s *testSuite) TestSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
txn, err := tk.Se.Txn(true)
c.Assert(kv.ErrInvalidTxn.Equal(err), IsTrue)
c.Assert(txn.Valid(), IsFalse)
tk.MustExec("create table t (c1 int, c2 int, c3 int)")
tk.MustExec("insert t values (11, 2, 3)")
tk.MustExec("insert t values (12, 2, 3)")
tk.MustExec("insert t values (13, 2, 3)")
tk.MustExec("create table t1 (c1 int)")
tk.MustExec("insert t1 values (11)")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
// no conflict for subquery.
tk1.MustExec("begin")
tk1.MustQuery("select * from t where exists(select null from t1 where t1.c1=t.c1) for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=22 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict, auto commit
tk1.MustExec("set @@autocommit=1;")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
tk1.MustExec("commit")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from (select * from t for update) t join t1 for update")
tk2.MustExec("begin")
tk2.MustExec("update t1 set c1 = 13")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite) TestEmptyEnum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (e enum('Y', 'N'))")
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert into t values (0)")
c.Assert(terror.ErrorEqual(err, types.ErrTruncated), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("insert into t values ('abc')")
c.Assert(terror.ErrorEqual(err, types.ErrTruncated), IsTrue, Commentf("err %v", err))
tk.MustExec("set sql_mode=''")
tk.MustExec("insert into t values (0)")
tk.MustQuery("select * from t").Check(testkit.Rows(""))
tk.MustExec("insert into t values ('abc')")
tk.MustQuery("select * from t").Check(testkit.Rows("", ""))
tk.MustExec("insert into t values (null)")
tk.MustQuery("select * from t").Check(testkit.Rows("", "", "<nil>"))
}
// TestIssue4024 This tests https://github.com/pingcap/tidb/issues/4024
func (s *testSuite) TestIssue4024(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test2")
tk.MustExec("use test2")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("update t, test2.t set test2.t.a=2")
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
tk.MustExec("update test.t, test2.t set test.t.a=3")
tk.MustQuery("select * from t").Check(testkit.Rows("3"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
}
const (
checkRequestOff = iota
checkRequestSyncLog
checkDDLAddIndexPriority
)
type checkRequestClient struct {
tikv.Client
priority pb.CommandPri
lowPriorityCnt uint32
mu struct {
sync.RWMutex
checkFlags uint32
syncLog bool
}
}
func (c *checkRequestClient) setCheckPriority(priority pb.CommandPri) {
atomic.StoreInt32((*int32)(&c.priority), int32(priority))
}
func (c *checkRequestClient) getCheckPriority() pb.CommandPri {
return (pb.CommandPri)(atomic.LoadInt32((*int32)(&c.priority)))
}
func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
resp, err := c.Client.SendRequest(ctx, addr, req, timeout)
c.mu.RLock()
checkFlags := c.mu.checkFlags
c.mu.RUnlock()
if checkFlags == checkRequestSyncLog {
switch req.Type {
case tikvrpc.CmdPrewrite, tikvrpc.CmdCommit:
c.mu.RLock()
syncLog := c.mu.syncLog
c.mu.RUnlock()
if syncLog != req.SyncLog {
return nil, errors.New("fail to set sync log")
}
}
} else if checkFlags == checkDDLAddIndexPriority {
if req.Type == tikvrpc.CmdScan {
if c.getCheckPriority() != req.Priority {
return nil, errors.New("fail to set priority")
}
} else if req.Type == tikvrpc.CmdPrewrite {
if c.getCheckPriority() == pb.CommandPri_Low {
atomic.AddUint32(&c.lowPriorityCnt, 1)
}
}
}
return resp, err
}
type testSuiteWithCliBase struct {
store kv.Storage
dom *domain.Domain
cli *checkRequestClient
}
type testSuite1 struct {
testSuiteWithCliBase
}
type testSerialSuite2 struct {
testSuiteWithCliBase
}
func (s *testSuiteWithCliBase) SetUpSuite(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
session.SetStatsLease(0)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.dom.SetStatsUpdating(true)
}
func (s *testSuiteWithCliBase) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testSuiteWithCliBase) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuite2) TestAddIndexPriority(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
store, err := mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
defer func() {
dom.Close()
err = store.Close()
c.Assert(err, IsNil)
}()
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (id int, v int)")
// Insert some data to make sure plan build IndexLookup for t1.
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i))
}
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_Low)
tk.MustExec("alter table t1 add index t1_index (id);")
c.Assert(atomic.LoadUint32(&cli.lowPriorityCnt) > 0, IsTrue)
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_NORMAL'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_Normal)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_HIGH'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_High)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite1) TestAlterTableComment(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_1")
tk.MustExec("create table t_1 (c1 int, c2 int, c3 int default 1, index (c1)) comment = 'test table';")
tk.MustExec("alter table `t_1` comment 'this is table comment';")
result := tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("this is table comment"))
tk.MustExec("alter table `t_1` comment 'table t comment';")
result = tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("table t comment"))
}
func (s *testSuite) TestTimezonePushDown(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (ts timestamp)")
defer tk.MustExec("drop table t")
tk.MustExec(`insert into t values ("2018-09-13 10:02:06")`)
systemTZ := timeutil.SystemLocation()
c.Assert(systemTZ.String(), Not(Equals), "System")
c.Assert(systemTZ.String(), Not(Equals), "Local")
ctx := context.Background()
count := 0
ctx1 := context.WithValue(ctx, checkSelectRequestHookString, func(req *kv.Request) {
count += 1
dagReq := new(tipb.DAGRequest)
err := proto.Unmarshal(req.Data, dagReq)
c.Assert(err, IsNil)
c.Assert(dagReq.GetTimeZoneName(), Equals, systemTZ.String())
})
_, err := tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(err, IsNil)
tk.MustExec(`set time_zone="System"`)
_, err = tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(err, IsNil)
c.Assert(count, Equals, 2) // Make sure the hook function is called.
}
func (s *testSuite) TestNotFillCacheFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int primary key)")
defer tk.MustExec("drop table t")
tk.MustExec("insert into t values (1)")
tests := []struct {
sql string
expect bool
}{
{"select SQL_NO_CACHE * from t", true},
{"select SQL_CACHE * from t", false},
{"select * from t", false},
}
count := 0
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, checkSelectRequestHookString, func(req *kv.Request) {
count++
if req.NotFillCache != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.NotFillCache)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
c.Assert(count, Equals, len(tests)) // Make sure the hook function is called.
}
func (s *testSuite1) TestSyncLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
cli := s.cli
cli.mu.Lock()
cli.mu.checkFlags = checkRequestSyncLog
cli.mu.syncLog = true
cli.mu.Unlock()
tk.MustExec("create table t (id int primary key)")
cli.mu.Lock()
cli.mu.syncLog = false
cli.mu.Unlock()
tk.MustExec("insert into t values (1)")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite) TestHandleTransfer(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, index idx(a))")
tk.MustExec("insert into t values(1), (2), (4)")
tk.MustExec("begin")
tk.MustExec("update t set a = 3 where a = 4")
// test table scan read whose result need handle.
tk.MustQuery("select * from t ignore index(idx)").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("insert into t values(4)")
// test single read whose result need handle
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t use index(idx) order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustExec("update t set a = 5 where a = 3")
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "4", "5"))
tk.MustExec("commit")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(3, 3), (1, 1), (2, 2)")
// Second test double read.
tk.MustQuery("select * from t use index(idx) order by a").Check(testkit.Rows("1 1", "2 2", "3 3"))
}
func (s *testSuite) TestBit(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(2))")
tk.MustExec("insert into t values (0), (1), (2), (3)")
_, err := tk.Exec("insert into t values (4)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values ('a')")
c.Assert(err, NotNil)
r, err := tk.Exec("select * from t where c1 = 2")
c.Assert(err, IsNil)
req := r.NewChunk()
err = r.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(types.BinaryLiteral(req.GetRow(0).GetBytes(0)), DeepEquals, types.NewBinaryLiteralFromUint(2, -1))
r.Close()
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(31))")
tk.MustExec("insert into t values (0x7fffffff)")
_, err = tk.Exec("insert into t values (0x80000000)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values (0xffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('123')")
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345)")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(62))")
tk.MustExec("insert into t values ('12345678')")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(61))")
_, err = tk.Exec("insert into t values ('12345678')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(32))")
tk.MustExec("insert into t values (0x7fffffff)")
tk.MustExec("insert into t values (0xffffffff)")
_, err = tk.Exec("insert into t values (0x1ffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
_, err = tk.Exec("insert into t values ('123456789')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
tk.MustQuery("select * from t where c1").Check(testkit.Rows("\xff\xff\xff\xff\xff\xff\xff\xff", "12345678"))
}
func (s *testSuite) TestEnum(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c enum('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values(1), (2), (3)")
tk.MustQuery("select * from t where c").Check(testkit.Rows("a", "b", "c"))
}
func (s *testSuite) TestSet(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c set('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c'), ('a,b'), ('b,a')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select * from t where c = 'a,b'").Check(testkit.Rows("a,b", "a,b"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values(3)")
tk.MustQuery("select * from t where c").Check(testkit.Rows("a,b"))
}
func (s *testSuite) TestSubqueryInValues(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, name varchar(20))")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (gid int)")
tk.MustExec("insert into t1 (gid) value (1)")
tk.MustExec("insert into t (id, name) value ((select gid from t1) ,'asd')")
tk.MustQuery("select * from t").Check(testkit.Rows("1 asd"))
}
func (s *testSuite) TestEnhancedRangeAccess(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b int)")
tk.MustExec("insert into t values(1, 2), (2, 1)")
tk.MustQuery("select * from t where (a = 1 and b = 2) or (a = 2 and b = 1)").Check(testkit.Rows("1 2", "2 1"))
tk.MustQuery("select * from t where (a = 1 and b = 1) or (a = 2 and b = 2)").Check(nil)
}
// TestMaxInt64Handle Issue #4810
func (s *testSuite) TestMaxInt64Handle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint, PRIMARY KEY (id))")
tk.MustExec("insert into t values(9223372036854775807)")
tk.MustExec("select * from t where id = 9223372036854775807")
tk.MustQuery("select * from t where id = 9223372036854775807;").Check(testkit.Rows("9223372036854775807"))
tk.MustQuery("select * from t").Check(testkit.Rows("9223372036854775807"))
_, err := tk.Exec("insert into t values(9223372036854775807)")
c.Assert(err, NotNil)
tk.MustExec("delete from t where id = 9223372036854775807")
tk.MustQuery("select * from t").Check(nil)
}
func (s *testSuite) TestTableScanWithPointRanges(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int, PRIMARY KEY (id))")
tk.MustExec("insert into t values(1), (5), (10)")
tk.MustQuery("select * from t where id in(1, 2, 10)").Check(testkit.Rows("1", "10"))
}
func (s *testSuite) TestUnsignedPk(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint unsigned primary key)")
var num1, num2 uint64 = math.MaxInt64 + 1, math.MaxInt64 + 2
tk.MustExec(fmt.Sprintf("insert into t values(%v), (%v), (1), (2)", num1, num2))
num1Str := strconv.FormatUint(num1, 10)
num2Str := strconv.FormatUint(num2, 10)
tk.MustQuery("select * from t order by id").Check(testkit.Rows("1", "2", num1Str, num2Str))
tk.MustQuery("select * from t where id not in (2)").Check(testkit.Rows(num1Str, num2Str, "1"))
tk.MustExec("drop table t")
tk.MustExec("create table t(a bigint unsigned primary key, b int, index idx(b))")
tk.MustExec("insert into t values(9223372036854775808, 1), (1, 1)")
tk.MustQuery("select * from t use index(idx) where b = 1 and a < 2").Check(testkit.Rows("1 1"))
tk.MustQuery("select * from t use index(idx) where b = 1 order by b, a").Check(testkit.Rows("1 1", "9223372036854775808 1"))
}
func (s *testSuite) TestSignedCommonHandle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(k1 int, k2 int, primary key(k1, k2))")
tk.MustExec("insert into t(k1, k2) value(-100, 1), (-50, 1), (0, 0), (1, 1), (3, 3)")
tk.MustQuery("select k1 from t order by k1").Check(testkit.Rows("-100", "-50", "0", "1", "3"))
tk.MustQuery("select k1 from t order by k1 desc").Check(testkit.Rows("3", "1", "0", "-50", "-100"))
tk.MustQuery("select k1 from t where k1 < -51").Check(testkit.Rows("-100"))
tk.MustQuery("select k1 from t where k1 < -1").Check(testkit.Rows("-100", "-50"))
tk.MustQuery("select k1 from t where k1 <= 0").Check(testkit.Rows("-100", "-50", "0"))
tk.MustQuery("select k1 from t where k1 < 2").Check(testkit.Rows("-100", "-50", "0", "1"))
tk.MustQuery("select k1 from t where k1 < -1 and k1 > -90").Check(testkit.Rows("-50"))
}
func (s *testSuite) TestIssue5666(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@profiling=1")
tk.MustQuery("SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID;").Check(testkit.Rows("0 0"))
}
func (s *testSuite) TestIssue5341(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop table if exists test.t")
tk.MustExec("create table test.t(a char)")
tk.MustExec("insert into test.t value('a')")
tk.MustQuery("select * from test.t where a < 1 order by a limit 0;").Check(testkit.Rows())
}
func (s *testSuite) TestContainDotColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test.t1")
tk.MustExec("create table test.t1(t1.a char)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a char, t2.b int)")
tk.MustExec("drop table if exists t3")
_, err := tk.Exec("create table t3(s.a char);")
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongTableName))
}
func (s *testSuite) TestCheckIndex(c *C) {
s.ctx = mock.NewContext()
s.ctx.Store = s.store
se, err := session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
defer se.Close()
_, err = se.Execute(context.Background(), "create database test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "create table t (pk int primary key, c int default 1, c1 int default 1, unique key c(c))")
c.Assert(err, IsNil)
is := s.domain.InfoSchema()
db := model.NewCIStr("test_admin")
dbInfo, ok := is.SchemaByName(db)
c.Assert(ok, IsTrue)
tblName := model.NewCIStr("t")
tbl, err := is.TableByName(db, tblName)
c.Assert(err, IsNil)
tbInfo := tbl.Meta()
alloc := autoid.NewAllocator(s.store, dbInfo.ID, false, autoid.RowIDAllocType)
tb, err := tables.TableFromMeta(autoid.NewAllocators(alloc), tbInfo)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t C")
c.Assert(err, IsNil)
// set data to:
// index data (handle, data): (1, 10), (2, 20)
// table data (handle, data): (1, 10), (2, 20)
recordVal1 := types.MakeDatums(int64(1), int64(10), int64(11))
recordVal2 := types.MakeDatums(int64(2), int64(20), int64(21))
c.Assert(s.ctx.NewTxn(context.Background()), IsNil)
_, err = tb.AddRecord(s.ctx, recordVal1)
c.Assert(err, IsNil)
_, err = tb.AddRecord(s.ctx, recordVal2)
c.Assert(err, IsNil)
txn, err := s.ctx.Txn(true)
c.Assert(err, IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
mockCtx := mock.NewContext()
idx := tb.Indices()[0]
sc := &stmtctx.StatementContext{TimeZone: time.Local}
_, err = se.Execute(context.Background(), "admin check index t idx_inexistent")
c.Assert(strings.Contains(err.Error(), "not exist"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(30)), kv.IntHandle(3), nil)
c.Assert(err, IsNil)
key := tablecodec.EncodeRowKey(tb.Meta().ID, kv.IntHandle(4).Encoded())
setColValue(c, txn, key, types.NewDatum(int64(40)))
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "handle 3, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:30, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:<nil>")
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(40)), kv.IntHandle(4), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 4"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(30)), kv.IntHandle(3))
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(20)), kv.IntHandle(2))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 2"), IsTrue)
// TODO: pass the case below:
// set data to:
// index data (handle, data): (1, 10), (4, 40), (2, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
}
func setColValue(c *C, txn kv.Transaction, key kv.Key, v types.Datum) {
row := []types.Datum{v, {}}
colIDs := []int64{2, 3}
sc := &stmtctx.StatementContext{TimeZone: time.Local}
rd := rowcodec.Encoder{Enable: true}
value, err := tablecodec.EncodeRow(sc, row, colIDs, nil, nil, &rd)
c.Assert(err, IsNil)
err = txn.Set(key, value)
c.Assert(err, IsNil)
}
func (s *testSuite) TestCheckTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Test 'admin check table' when the table has a unique index with null values.
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (NULL, NULL);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCheckTableClusterIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, primary key (c1, c2), index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (3, 3);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCoprocessorStreamingFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, value int, index idx(id))")
// Add some data to make statistics work.
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i))
}
tests := []struct {
sql string
expect bool
}{
{"select * from t", true}, // TableReader
{"select * from t where id = 5", true}, // IndexLookup
{"select * from t where id > 5", true}, // Filter
{"select * from t limit 3", false}, // Limit
{"select avg(id) from t", false}, // Aggregate
{"select * from t order by value limit 3", false}, // TopN
}
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, checkSelectRequestHookString, func(req *kv.Request) {
if req.Streaming != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.Streaming)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
}
func (s *testSuite) TestIncorrectLimitArg(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint);`)
tk.MustExec(`prepare stmt1 from 'select * from t limit ?';`)
tk.MustExec(`prepare stmt2 from 'select * from t limit ?, ?';`)
tk.MustExec(`set @a = -1;`)
tk.MustExec(`set @b = 1;`)
var err error
_, err = tk.Se.Execute(context.TODO(), `execute stmt1 using @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
_, err = tk.Se.Execute(context.TODO(), `execute stmt2 using @b, @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
}
func (s *testSuite) TestLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint, b bigint);`)
tk.MustExec(`insert into t values(1, 1), (2, 2), (3, 30), (4, 40), (5, 5), (6, 6);`)
tk.MustQuery(`select * from t order by a limit 1, 1;`).Check(testkit.Rows(
"2 2",
))
tk.MustQuery(`select * from t order by a limit 1, 2;`).Check(testkit.Rows(
"2 2",
"3 30",
))
tk.MustQuery(`select * from t order by a limit 1, 3;`).Check(testkit.Rows(
"2 2",
"3 30",
"4 40",
))
tk.MustQuery(`select * from t order by a limit 1, 4;`).Check(testkit.Rows(
"2 2",
"3 30",
"4 40",
"5 5",
))
// test inline projection
tk.MustQuery(`select a from t where a > 0 limit 1, 1;`).Check(testkit.Rows(
"2",
))
tk.MustQuery(`select a from t where a > 0 limit 1, 2;`).Check(testkit.Rows(
"2",
"3",
))
tk.MustQuery(`select b from t where a > 0 limit 1, 3;`).Check(testkit.Rows(
"2",
"30",
"40",
))
tk.MustQuery(`select b from t where a > 0 limit 1, 4;`).Check(testkit.Rows(
"2",
"30",
"40",
"5",
))
// test @@tidb_init_chunk_size=2
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustQuery(`select * from t where a > 0 limit 2, 1;`).Check(testkit.Rows(
"3 30",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 2;`).Check(testkit.Rows(
"3 30",
"4 40",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 3;`).Check(testkit.Rows(
"3 30",
"4 40",
"5 5",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 4;`).Check(testkit.Rows(
"3 30",
"4 40",
"5 5",
"6 6",
))
// test inline projection
tk.MustQuery(`select a from t order by a limit 2, 1;`).Check(testkit.Rows(
"3",
))
tk.MustQuery(`select b from t order by a limit 2, 2;`).Check(testkit.Rows(
"30",
"40",
))
tk.MustQuery(`select a from t order by a limit 2, 3;`).Check(testkit.Rows(
"3",
"4",
"5",
))
tk.MustQuery(`select b from t order by a limit 2, 4;`).Check(testkit.Rows(
"30",
"40",
"5",
"6",
))
}
func (s *testSuite) TestCoprocessorStreamingWarning(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a double)")
tk.MustExec("insert into t value(1.2)")
tk.MustExec("set @@session.tidb_enable_streaming = 1")
result := tk.MustQuery("select * from t where a/0 > 1")
result.Check(testkit.Rows())
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1365|Division by 0"))
}
func (s *testSuite3) TestYearTypeDeleteIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a YEAR, PRIMARY KEY(a));")
tk.MustExec("insert into t set a = '2151';")
tk.MustExec("delete from t;")
tk.MustExec("admin check table t")
}
func (s *testSuite3) TestForSelectScopeInUnion(c *C) {
// A union B for update, the "for update" option belongs to union statement, so
// it should works on both A and B.
tk1 := testkit.NewTestKit(c, s.store)
tk2 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t")
tk1.MustExec("create table t(a int)")
tk1.MustExec("insert into t values (1)")
tk1.MustExec("begin")
// 'For update' would act on the second select.
tk1.MustQuery("select 1 as a union select a from t for update")
tk2.MustExec("use test")
tk2.MustExec("update t set a = a + 1")
// As tk1 use select 'for update', it should detect conflict and fail.
_, err := tk1.Exec("commit")
c.Assert(err, NotNil)
tk1.MustExec("begin")
tk1.MustQuery("select 1 as a union select a from t limit 5 for update")
tk1.MustQuery("select 1 as a union select a from t order by a for update")
tk2.MustExec("update t set a = a + 1")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite3) TestUnsignedDecimalOverflow(c *C) {
tests := []struct {
input interface{}
hasErr bool
err string
}{{
-1,
true,
"Out of range value for column",
}, {
"-1.1e-1",
true,
"Out of range value for column",
}, {
-1.1,
true,
"Out of range value for column",
}, {
-0,
false,
"",
},
}
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(10,2) unsigned)")
for _, t := range tests {
res, err := tk.Exec("insert into t values (?)", t.input)
if res != nil {
defer res.Close()
}
if t.hasErr {
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), t.err), IsTrue)
} else {
c.Assert(err, IsNil)
}
if res != nil {
c.Assert(res.Close(), IsNil)
}
}
tk.MustExec("set sql_mode=''")
tk.MustExec("delete from t")
tk.MustExec("insert into t values (?)", -1)
r := tk.MustQuery("select a from t limit 1")
r.Check(testkit.Rows("0.00"))
}
func (s *testSuite3) TestIndexJoinTableDualPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists a")
tk.MustExec("create table a (f1 int, f2 varchar(32), primary key (f1))")
tk.MustExec("insert into a (f1,f2) values (1,'a'), (2,'b'), (3,'c')")
// TODO here: index join cause the data race of txn.
tk.MustQuery("select /*+ inl_merge_join(a) */ a.* from a inner join (select 1 as k1,'k2-1' as k2) as k on a.f1=k.k1;").
Check(testkit.Rows("1 a"))
}
func (s *testSuite3) TestSortLeftJoinWithNullColumnInRightChildPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int)")
tk.MustExec("create table t2(a int)")
tk.MustExec("insert into t1(a) select 1;")
tk.MustQuery("select b.n from t1 left join (select a as a, null as n from t2) b on b.a = t1.a order by t1.a").
Check(testkit.Rows("<nil>"))
}
func (s *testSuiteP1) TestUnionAutoSignedCast(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
tk.MustExec("create table t1 (id int, i int, b bigint, d double, dd decimal)")
tk.MustExec("create table t2 (id int, i int unsigned, b bigint unsigned, d double unsigned, dd decimal unsigned)")
tk.MustExec("insert into t1 values(1, -1, -1, -1.1, -1)")
tk.MustExec("insert into t2 values(2, 1, 1, 1.1, 1)")
tk.MustQuery("select * from t1 union select * from t2 order by id").
Check(testkit.Rows("1 -1 -1 -1.1 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i, b, d, dd from t2 union select id, i, b, d, dd from t1 order by id").
Check(testkit.Rows("1 0 0 0 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i from t2 union select id, cast(i as unsigned int) from t1 order by id").
Check(testkit.Rows("1 18446744073709551615", "2 1"))
tk.MustQuery("select dd from t2 union all select dd from t2").
Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t3,t4")
tk.MustExec("create table t3 (id int, v int)")
tk.MustExec("create table t4 (id int, v double unsigned)")
tk.MustExec("insert into t3 values (1, -1)")
tk.MustExec("insert into t4 values (2, 1)")
tk.MustQuery("select id, v from t3 union select id, v from t4 order by id").
Check(testkit.Rows("1 -1", "2 1"))
tk.MustQuery("select id, v from t4 union select id, v from t3 order by id").
Check(testkit.Rows("1 0", "2 1"))
tk.MustExec("drop table if exists t5,t6,t7")
tk.MustExec("create table t5 (id int, v bigint unsigned)")
tk.MustExec("create table t6 (id int, v decimal)")
tk.MustExec("create table t7 (id int, v bigint)")
tk.MustExec("insert into t5 values (1, 1)")
tk.MustExec("insert into t6 values (2, -1)")
tk.MustExec("insert into t7 values (3, -1)")
tk.MustQuery("select id, v from t5 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1"))
tk.MustQuery("select id, v from t5 union select id, v from t7 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1", "3 -1"))
}
func (s *testSuiteP1) TestUpdateClustered(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
type resultChecker struct {
check string
assert []string
}
for _, clustered := range []string{"", "clustered"} {
tests := []struct {
initSchema []string
initData []string
dml string
resultCheck []resultChecker
}{
{ // left join + update both + match & unmatched + pk
[]string{
"drop table if exists a, b",
"create table a (k1 int, k2 int, v int)",
fmt.Sprintf("create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) %s)", clustered),
},
[]string{
"insert into a values (1, 1, 1), (2, 2, 2)", // unmatched + matched
"insert into b values (2, 2, 2, 2)",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2",
[]resultChecker{
{
"select * from b",
[]string{"2 3 4 100"},
},
{
"select * from a",
[]string{"2 3 20", "3 4 20"},
},
},
},
{ // left join + update both + match & unmatched + pk
[]string{
"drop table if exists a, b",
"create table a (k1 int, k2 int, v int)",
fmt.Sprintf("create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) %s)", clustered),
},
[]string{
"insert into a values (1, 1, 1), (2, 2, 2)", // unmatched + matched
"insert into b values (2, 2, 2, 2)",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"2 3 4 100"},
},
{
"select * from a",
[]string{"2 3 20", "3 4 20"},
},
},
},
{ // left join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"12 13 20", "23 24 20"},
},
},
},
{ // right join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"12 13 20", "23 24 20"},
},
},
},
{ // inner join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"11 11 11", "23 24 20"},
},
},
},
{
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2)",
[]resultChecker{
{
"select * from b",
[]string{"22 22 22 22"},
},
{
"select * from a",
[]string{"11 11 11", "23 24 20"},
},
},
},
}
for _, test := range tests {
for _, s := range test.initSchema {
tk.MustExec(s)
}
for _, s := range test.initData {
tk.MustExec(s)
}
tk.MustExec(test.dml)
for _, checker := range test.resultCheck {
tk.MustQuery(checker.check).Check(testkit.Rows(checker.assert...))
}
tk.MustExec("admin check table a")
tk.MustExec("admin check table b")
}
}
}
func (s *testSuite6) TestUpdateJoin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7")
tk.MustExec("create table t1(k int, v int)")
tk.MustExec("create table t2(k int, v int)")
tk.MustExec("create table t3(id int auto_increment, k int, v int, primary key(id))")
tk.MustExec("create table t4(k int, v int)")
tk.MustExec("create table t5(v int, k int, primary key(k))")
tk.MustExec("insert into t1 values (1, 1)")
tk.MustExec("insert into t4 values (3, 3)")
tk.MustExec("create table t6 (id int, v longtext)")
tk.MustExec("create table t7 (x int, id int, v longtext, primary key(id))")
// test the normal case that update one row for a single table.
tk.MustExec("update t1 set v = 0 where k = 1")
tk.MustQuery("select k, v from t1 where k = 1").Check(testkit.Rows("1 0"))
// test the case that the table with auto_increment or none-null columns as the right table of left join.
tk.MustExec("update t1 left join t3 on t1.k = t3.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 1"))
tk.MustQuery("select id, k, v from t3").Check(testkit.Rows())
// test left join and the case that the right table has no matching record but has updated the right table columns.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = t2.v, t2.v = 3")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case that the update operation in the left table references data in the right table while data of the right table columns is modified.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 3, t1.v = t2.v")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test right join and the case that the left table has no matching record but has updated the left table columns.
tk.MustExec("update t2 right join t1 on t2.k = t1.k set t2.v = 4, t1.v = 0")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case of right join and left join at the same time.
tk.MustExec("update t1 left join t2 on t1.k = t2.k right join t4 on t4.k = t2.k set t1.v = 4, t2.v = 4, t4.v = 4")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
tk.MustQuery("select k, v from t4").Check(testkit.Rows("3 4"))
// test normal left join and the case that the right table has matching rows.
tk.MustExec("insert t2 values (1, 10)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 11")
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the case of continuously joining the same table and updating the unmatching records.
tk.MustExec("update t1 t11 left join t2 on t11.k = t2.k left join t1 t12 on t2.v = t12.k set t12.v = 233, t11.v = 111")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 111"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the left join case that the left table has records but all records are null.
tk.MustExec("delete from t1")
tk.MustExec("delete from t2")
tk.MustExec("insert into t1 values (null, null)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 1"))
// test the case that the right table of left join has an primary key.
tk.MustExec("insert t5 values(0, 0)")
tk.MustExec("update t1 left join t5 on t1.k = t5.k set t1.v = 2")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 2"))
tk.MustQuery("select k, v from t5").Check(testkit.Rows("0 0"))
tk.MustExec("insert into t6 values (1, NULL)")
tk.MustExec("insert into t7 values (5, 1, 'a')")
tk.MustExec("update t6, t7 set t6.v = t7.v where t6.id = t7.id and t7.x = 5")
tk.MustQuery("select v from t6").Check(testkit.Rows("a"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(id int primary key, v int, gv int GENERATED ALWAYS AS (v * 2) STORED)")
tk.MustExec("create table t2(id int, v int)")
tk.MustExec("update t1 tt1 inner join (select count(t1.id) a, t1.id from t1 left join t2 on t1.id = t2.id group by t1.id) x on tt1.id = x.id set tt1.v = tt1.v + x.a")
}
func (s *testSuite3) TestMaxOneRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`drop table if exists t2`)
tk.MustExec(`create table t1(a double, b double);`)
tk.MustExec(`create table t2(a double, b double);`)
tk.MustExec(`insert into t1 values(1, 1), (2, 2), (3, 3);`)
tk.MustExec(`insert into t2 values(0, 0);`)
tk.MustExec(`set @@tidb_init_chunk_size=1;`)
rs, err := tk.Exec(`select (select t1.a from t1 where t1.a > t2.a) as a from t2;`)
c.Assert(err, IsNil)
err = rs.Next(context.TODO(), rs.NewChunk())
c.Assert(err.Error(), Equals, "[executor:1242]Subquery returns more than 1 row")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP2) TestCurrentTimestampValueSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (id int, t0 timestamp null default current_timestamp, t1 timestamp(1) null default current_timestamp(1), t2 timestamp(2) null default current_timestamp(2) on update current_timestamp(2))")
tk.MustExec("insert into t (id) values (1)")
rs := tk.MustQuery("select t0, t1, t2 from t where id = 1")
t0 := rs.Rows()[0][0].(string)
t1 := rs.Rows()[0][1].(string)
t2 := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(t0, ".")), Equals, 1)
c.Assert(len(strings.Split(t1, ".")[1]), Equals, 1)
c.Assert(len(strings.Split(t2, ".")[1]), Equals, 2)
tk.MustQuery("select id from t where t0 = ?", t0).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t1 = ?", t1).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t2 = ?", t2).Check(testkit.Rows("1"))
time.Sleep(time.Second)
tk.MustExec("update t set t0 = now() where id = 1")
rs = tk.MustQuery("select t2 from t where id = 1")
newT2 := rs.Rows()[0][0].(string)
c.Assert(newT2 != t2, IsTrue)
tk.MustExec("create table t1 (id int, a timestamp, b timestamp(2), c timestamp(3))")
tk.MustExec("insert into t1 (id, a, b, c) values (1, current_timestamp(2), current_timestamp, current_timestamp(3))")
rs = tk.MustQuery("select a, b, c from t1 where id = 1")
a := rs.Rows()[0][0].(string)
b := rs.Rows()[0][1].(string)
d := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(a, ".")), Equals, 1)
c.Assert(strings.Split(b, ".")[1], Equals, "00")
c.Assert(len(strings.Split(d, ".")[1]), Equals, 3)
}
func (s *testSuite3) TestRowID(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
tk.MustExec(`create table t(a varchar(10), b varchar(10), c varchar(1), index idx(a, b, c));`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustQuery(`select b, _tidb_rowid from t use index(idx) where a = 'a';`).Check(testkit.Rows(
`b 1`,
`b 2`,
))
tk.MustExec(`begin;`)
tk.MustExec(`select * from t for update`)
tk.MustQuery(`select distinct b from t use index(idx) where a = 'a';`).Check(testkit.Rows(`b`))
tk.MustExec(`commit;`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(5) primary key)`)
tk.MustExec(`insert into t values('a')`)
tk.MustQuery("select *, _tidb_rowid from t use index(`primary`) where _tidb_rowid=1").Check(testkit.Rows("a 1"))
}
func (s *testSuite3) TestDoSubquery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
_, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
tk.MustExec(`insert into t values(1)`)
r, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
c.Assert(r, IsNil, Commentf("result of Do not empty"))
}
func (s *testSuite3) TestSubqueryTableAlias(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec("set sql_mode = ''")
tk.MustGetErrCode("select a, b from (select 1 a) ``, (select 2 b) ``;", mysql.ErrDerivedMustHaveAlias)
tk.MustGetErrCode("select a, b from (select 1 a) `x`, (select 2 b) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select a, b from (select 1 a), (select 2 b);", mysql.ErrDerivedMustHaveAlias)
// ambiguous column name
tk.MustGetErrCode("select a from (select 1 a) ``, (select 2 a) ``;", mysql.ErrDerivedMustHaveAlias)
tk.MustGetErrCode("select a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select x.a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select a from (select 1 a), (select 2 a);", mysql.ErrDerivedMustHaveAlias)
tk.MustExec("set sql_mode = 'oracle';")
tk.MustQuery("select a, b from (select 1 a) ``, (select 2 b) ``;").Check(testkit.Rows("1 2"))
tk.MustQuery("select a, b from (select 1 a) `x`, (select 2 b) `x`;").Check(testkit.Rows("1 2"))
tk.MustQuery("select a, b from (select 1 a), (select 2 b);").Check(testkit.Rows("1 2"))
// ambiguous column name
tk.MustGetErrCode("select a from (select 1 a) ``, (select 2 a) ``;", mysql.ErrNonUniq)
tk.MustGetErrCode("select a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonUniq)
tk.MustGetErrCode("select x.a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonUniq)
tk.MustGetErrCode("select a from (select 1 a), (select 2 a);", mysql.ErrNonUniq)
}
func (s *testSerialSuite) TestTSOFail(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockGetTSFail", "return"), IsNil)
ctx := failpoint.WithHook(context.Background(), func(ctx context.Context, fpname string) bool {
return fpname == "github.com/pingcap/tidb/session/mockGetTSFail"
})
_, err := tk.Se.Execute(ctx, `select * from t`)
c.Assert(err, NotNil)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockGetTSFail"), IsNil)
}
func (s *testSuite3) TestSelectHashPartitionTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th`)
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
defer tk.MustExec(`drop table if exists th`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustQuery("select b from th order by a").Check(testkit.Rows("-8", "-7", "-6", "-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5", "6", "7", "8"))
tk.MustQuery(" select * from th where a=-2;").Check(testkit.Rows("-2 -2"))
tk.MustQuery(" select * from th where a=5;").Check(testkit.Rows("5 5"))
}
func (s *testSuiteP1) TestSelectPartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th, tr, tl`)
tk.MustExec("set @@session.tidb_enable_list_partition = ON;")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
tk.MustExec(`create table tr (a int, b int)
partition by range (a) (
partition r0 values less than (4),
partition r1 values less than (7),
partition r3 values less than maxvalue)`)
tk.MustExec(`create table tl (a int, b int, unique index idx(a)) partition by list (a) (
partition p0 values in (3,5,6,9,17),
partition p1 values in (1,2,10,11,19,20),
partition p2 values in (4,12,13,14,18),
partition p3 values in (7,8,15,16,null));`)
defer tk.MustExec(`drop table if exists th, tr, tl`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustExec(`insert into tr values (-3,-3),(3,3),(4,4),(7,7),(8,8);`)
tk.MustExec(`insert into tl values (3,3),(1,1),(4,4),(7,7),(8,8),(null,null);`)
// select 1 partition.
tk.MustQuery("select b from th partition (p0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0) order by a").Check(testkit.Rows("-3", "3"))
tk.MustQuery("select b from tl partition (p0) order by a").Check(testkit.Rows("3"))
tk.MustQuery("select b from th partition (p0,P0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0,R0,r0) order by a").Check(testkit.Rows("-3", "3"))
tk.MustQuery("select b from tl partition (p0,P0,p0) order by a").Check(testkit.Rows("3"))
// select multi partition.
tk.MustQuery("select b from th partition (P2,p0) order by a").Check(testkit.Rows("-8", "-6", "-5", "-3", "-2", "0", "2", "3", "5", "6", "8"))
tk.MustQuery("select b from tr partition (r1,R3) order by a").Check(testkit.Rows("4", "7", "8"))
tk.MustQuery("select b from tl partition (p0,P3) order by a").Check(testkit.Rows("<nil>", "3", "7", "8"))
// test select unknown partition error
err := tk.ExecToErr("select b from th partition (p0,p4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'th'")
err = tk.ExecToErr("select b from tr partition (r1,r4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'r4' in table 'tr'")
err = tk.ExecToErr("select b from tl partition (p0,p4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'tl'")
// test select partition table in transaction.
tk.MustExec("begin")
tk.MustExec("insert into th values (10,10),(11,11)")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
tk.MustExec("commit")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
// test partition function is scalar func
tk.MustExec("drop table if exists tscalar")
tk.MustExec(`create table tscalar (c1 int) partition by range (c1 % 30) (
partition p0 values less than (0),
partition p1 values less than (10),
partition p2 values less than (20),
partition pm values less than (maxvalue));`)
tk.MustExec("insert into tscalar values(0), (10), (40), (50), (55)")
// test IN expression
tk.MustExec("insert into tscalar values(-0), (-10), (-40), (-50), (-55)")
tk.MustQuery("select * from tscalar where c1 in (55, 55)").Check(testkit.Rows("55"))
tk.MustQuery("select * from tscalar where c1 in (40, 40)").Check(testkit.Rows("40"))
tk.MustQuery("select * from tscalar where c1 in (40)").Check(testkit.Rows("40"))
tk.MustQuery("select * from tscalar where c1 in (-40)").Check(testkit.Rows("-40"))
tk.MustQuery("select * from tscalar where c1 in (-40, -40)").Check(testkit.Rows("-40"))
tk.MustQuery("select * from tscalar where c1 in (-1)").Check(testkit.Rows())
}
func (s *testSuiteP1) TestDeletePartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`create table t1 (a int) partition by range (a) (
partition p0 values less than (10),
partition p1 values less than (20),
partition p2 values less than (30),
partition p3 values less than (40),
partition p4 values less than MAXVALUE
)`)
tk.MustExec("insert into t1 values (1),(11),(21),(31)")
tk.MustExec("delete from t1 partition (p4)")
tk.MustQuery("select * from t1 order by a").Check(testkit.Rows("1", "11", "21", "31"))
tk.MustExec("delete from t1 partition (p0) where a > 10")
tk.MustQuery("select * from t1 order by a").Check(testkit.Rows("1", "11", "21", "31"))
tk.MustExec("delete from t1 partition (p0,p1,p2)")
tk.MustQuery("select * from t1").Check(testkit.Rows("31"))
}
func (s *testSuite) TestSelectView(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table view_t (a int,b int)")
tk.MustExec("insert into view_t values(1,2)")
tk.MustExec("create definer='root'@'localhost' view view1 as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view2(c,d) as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view3(c,d) as select a,b from view_t")
tk.MustExec("create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb;")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(c int,d int)")
err := tk.ExecToErr("select * from view1")
c.Assert(err.Error(), Equals, "[planner:1356]View 'test.view1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them")
err = tk.ExecToErr("select * from view2")
c.Assert(err.Error(), Equals, "[planner:1356]View 'test.view2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them")
err = tk.ExecToErr("select * from view3")
c.Assert(err.Error(), Equals, plannercore.ErrViewInvalid.GenWithStackByArgs("test", "view3").Error())
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(a int,b int,c int)")
tk.MustExec("insert into view_t values(1,2,3)")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("alter table view_t drop column a")
tk.MustExec("alter table view_t add column a int after b")
tk.MustExec("update view_t set a=1;")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("drop view view1,view2,view3,view4;")
tk.MustExec("set @@tidb_enable_window_function = 1")
defer func() {
tk.MustExec("set @@tidb_enable_window_function = 0")
}()
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t values (1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t")
result := tk.MustQuery("select * from v")
result.Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2"))
tk.MustExec("drop view v;")
}
type testSuite2 struct {
*baseTestSuite
}
func (s *testSuite2) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite3 struct {
*baseTestSuite
}
func (s *testSuite3) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite4 struct {
*baseTestSuite
}
func (s *testSuite4) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite5 struct {
*baseTestSuite
}
func (s *testSuite5) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite6 struct {
*baseTestSuite
}
func (s *testSuite6) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite7 struct {
*baseTestSuite
}
func (s *testSuite7) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite8 struct {
*baseTestSuite
}
func (s *testSuite8) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSerialSuite1 struct {
*baseTestSuite
}
func (s *testSerialSuite1) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
func (s *testSuiteP2) TestStrToDateBuiltin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%!') from dual`).Check(testkit.Rows("2019-01-01"))
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%f') from dual`).Check(testkit.Rows("2019-01-01 00:00:00.000000"))
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%H%i%s') from dual`).Check(testkit.Rows("2019-01-01 00:00:00"))
tk.MustQuery(`select str_to_date('18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("0018-10-22"))
tk.MustQuery(`select str_to_date('2018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%Y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('20188/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018510522','%Y5%m5%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018^10^22','%Y^%m^%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018@10@22','%Y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018%10%22','%Y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018(10(22','%Y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018\10\22','%Y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018=10=22','%Y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018+10+22','%Y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018_10_22','%Y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('69510522','%y5%m5%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('69^10^22','%y^%m^%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('18@10@22','%y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18%10%22','%y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18(10(22','%y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18\10\22','%y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18+10+22','%y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18=10=22','%y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18_10_22','%y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 11:22:33 PM', '%Y-%m-%d %r')`).Check(testkit.Rows("2020-07-04 23:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 12:22:33 AM', '%Y-%m-%d %r')`).Check(testkit.Rows("2020-07-04 00:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 12:22:33', '%Y-%m-%d %T')`).Check(testkit.Rows("2020-07-04 12:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 00:22:33', '%Y-%m-%d %T')`).Check(testkit.Rows("2020-07-04 00:22:33"))
}
func (s *testSuiteP2) TestAddDateBuiltinWithWarnings(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@sql_mode='NO_ZERO_DATE'")
result := tk.MustQuery(`select date_add('2001-01-00', interval -2 hour);`)
result.Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Incorrect datetime value: '2001-01-00'"))
}
func (s *testSuiteP2) TestStrToDateBuiltinWithWarnings(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@sql_mode='NO_ZERO_DATE'")
tk.MustExec("use test")
tk.MustQuery(`SELECT STR_TO_DATE('0000-1-01', '%Y-%m-%d');`).Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1411 Incorrect datetime value: '0000-1-01' for function str_to_date"))
}
func (s *testSuiteP2) TestReadPartitionedTable(c *C) {
// Test three reader on partitioned table.
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists pt")
tk.MustExec("create table pt (a int, b int, index i_b(b)) partition by range (a) (partition p1 values less than (2), partition p2 values less than (4), partition p3 values less than (6))")
for i := 0; i < 6; i++ {
tk.MustExec(fmt.Sprintf("insert into pt values(%d, %d)", i, i))
}
// Table reader
tk.MustQuery("select * from pt order by a").Check(testkit.Rows("0 0", "1 1", "2 2", "3 3", "4 4", "5 5"))
// Index reader
tk.MustQuery("select b from pt where b = 3").Check(testkit.Rows("3"))
// Index lookup
tk.MustQuery("select a from pt where b = 3").Check(testkit.Rows("3"))
}
func (s *testSplitTable) TestSplitRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
_, err := tk.Exec(`split table t index idx1 by ("abcd");`)
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.WarnDataTruncated))
// Test for split index region.
// Check min value is more than max value.
tk.MustExec(`split table t index idx1 between (0) and (1000000000) regions 10`)
tk.MustGetErrCode(`split table t index idx1 between (2,'a') and (1,'c') regions 10`, errno.ErrInvalidSplitRegionRanges)
// Check min value is invalid.
_, err = tk.Exec(`split table t index idx1 between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region lower value count should more than 0")
// Check max value is invalid.
_, err = tk.Exec(`split table t index idx1 between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region upper value count should more than 0")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t index idx1 between ("aa") and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column 'b'")
// Test for split table region.
tk.MustExec(`split table t between (0) and (1000000000) regions 10`)
// Check the lower value is more than the upper value.
tk.MustGetErrCode(`split table t between (2) and (1) regions 10`, errno.ErrInvalidSplitRegionRanges)
// Check the lower value is invalid.
_, err = tk.Exec(`split table t between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region lower value count should be 1")
// Check upper value is invalid.
_, err = tk.Exec(`split table t between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region upper value count should be 1")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t between ("aa") and (1000000000) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column '_tidb_rowid'")
// Test split table region step is too small.
tk.MustGetErrCode(`split table t between (0) and (100) regions 10`, errno.ErrInvalidSplitRegionRanges)
// Test split region by syntax.
tk.MustExec(`split table t by (0),(1000),(1000000)`)
// Test split region twice to test for multiple batch split region requests.
tk.MustExec("create table t1(a int, b int)")
tk.MustQuery("split table t1 between(0) and (10000) regions 10;").Check(testkit.Rows("9 1"))
tk.MustQuery("split table t1 between(10) and (10010) regions 5;").Check(testkit.Rows("4 1"))
// Test split region for partition table.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (1000000) regions 5;").Check(testkit.Rows("20 1"))
// Test for `split for region` syntax.
tk.MustQuery("split region for partition table t between (1000000) and (100000000) regions 10;").Check(testkit.Rows("45 1"))
// Test split region for partition table with specified partition.
tk.MustQuery("split table t partition (p1,p2) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1"))
// Test for `split for region` syntax.
tk.MustQuery("split region for partition table t partition (p3,p4) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1"))
}
func (s *testSplitTable) TestSplitRegionEdgeCase(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a bigint(20) auto_increment primary key);")
tk.MustExec("split table t between (-9223372036854775808) and (9223372036854775807) regions 16;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int(20) auto_increment primary key);")
tk.MustGetErrCode("split table t between (-9223372036854775808) and (9223372036854775807) regions 16;", errno.ErrDataOutOfRange)
}
func (s *testSplitTable) TestClusterIndexSplitTableIntegration(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop database if exists test_cluster_index_index_split_table_integration;")
tk.MustExec("create database test_cluster_index_index_split_table_integration;")
tk.MustExec("use test_cluster_index_index_split_table_integration;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a varchar(255), b double, c int, primary key (a, b));")
// Value list length not match.
lowerMsg := "Split table region lower value count should be 2"
upperMsg := "Split table region upper value count should be 2"
tk.MustGetErrMsg("split table t between ('aaa') and ('aaa', 100.0) regions 10;", lowerMsg)
tk.MustGetErrMsg("split table t between ('aaa', 1.0) and ('aaa', 100.0, 11) regions 10;", upperMsg)
// Value type not match.
errMsg := "[types:1265]Incorrect value: 'aaa' for column 'b'"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and (100.0, 'aaa') regions 10;", errMsg)
// lower bound >= upper bound.
errMsg = "[executor:8212]Failed to split region ranges: Split table `t` region lower value (aaa,0) should less than the upper value (aaa,0)"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and ('aaa', 0.0) regions 10;", errMsg)
errMsg = "[executor:8212]Failed to split region ranges: Split table `t` region lower value (bbb,0) should less than the upper value (aaa,0)"
tk.MustGetErrMsg("split table t between ('bbb', 0.0) and ('aaa', 0.0) regions 10;", errMsg)
// Exceed limit 1000.
errMsg = "Split table region num exceeded the limit 1000"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and ('aaa', 0.1) regions 100000;", errMsg)
// Split on null values.
errMsg = "[planner:1048]Column 'a' cannot be null"
tk.MustGetErrMsg("split table t between (null, null) and (null, null) regions 1000;", errMsg)
tk.MustGetErrMsg("split table t by (null, null);", errMsg)
// Success.
tk.MustExec("split table t between ('aaa', 0.0) and ('aaa', 100.0) regions 10;")
tk.MustExec("split table t by ('aaa', 0.0), ('aaa', 20.0), ('aaa', 100.0);")
tk.MustExec("split table t by ('aaa', 100.0), ('qqq', 20.0), ('zzz', 100.0), ('zzz', 1000.0);")
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int, c int, d int, primary key(a, c, d));")
tk.MustQuery("split table t between (0, 0, 0) and (0, 0, 1) regions 1000;").Check(testkit.Rows("999 1"))
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int, c int, d int, primary key(d, a, c));")
tk.MustQuery("split table t by (0, 0, 0), (1, 2, 3), (65535, 65535, 65535);").Check(testkit.Rows("3 1"))
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a varchar(255), b decimal, c int, primary key (a, b));")
errMsg = "[types:1265]Incorrect value: '' for column 'b'"
tk.MustGetErrMsg("split table t by ('aaa', '')", errMsg)
}
func (s *testSplitTable) TestClusterIndexShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set global tidb_scatter_region = 1")
tk.MustExec("drop database if exists cluster_index_regions;")
tk.MustExec("create database cluster_index_regions;")
tk.MustExec("use cluster_index_regions;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a int, b int, c int, primary key(a, b));")
tk.MustExec("insert t values (1, 1, 1), (2, 2, 2);")
tk.MustQuery("split table t between (1, 0) and (2, 3) regions 2;").Check(testkit.Rows("1 1"))
rows := tk.MustQuery("show table t regions").Rows()
tbl := testGetTableByName(c, tk.Se, "cluster_index_regions", "t")
// Check the region start key.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_r_03800000000000000183800000000000", tbl.Meta().ID))
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int);")
tk.MustQuery("split table t between (0) and (100000) regions 2;").Check(testkit.Rows("1 1"))
rows = tk.MustQuery("show table t regions").Rows()
tbl = testGetTableByName(c, tk.Se, "cluster_index_regions", "t")
// Check the region start key is int64.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_r_50000", tbl.Meta().ID))
}
func (s *testSuiteWithData) TestClusterIndexOuterJoinElimination(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a int, b int, c int, primary key(a,b))")
rows := tk.MustQuery(`explain format = 'brief' select t1.a from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b`).Rows()
rowStrs := s.testData.ConvertRowsToStrings(rows)
for _, row := range rowStrs {
// outer join has been eliminated.
c.Assert(strings.Index(row, "Join"), Equals, -1)
}
}
func (s *testSplitTable) TestShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_regions")
tk.MustExec("set global tidb_scatter_region = 1")
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("create table t_regions (a int key, b int, c int, index idx(b), index idx2(c))")
_, err := tk.Exec("split partition table t_regions partition (p1,p2) index idx between (0) and (20000) regions 2;")
c.Assert(err.Error(), Equals, plannercore.ErrPartitionClauseOnNonpartitioned.Error())
// Test show table regions.
tk.MustQuery(`split table t_regions between (-10000) and (10000) regions 4;`).Check(testkit.Rows("4 1"))
re := tk.MustQuery("show table t_regions regions")
rows := re.Rows()
// Table t_regions should have 5 regions now.
// 4 regions to store record data.
// 1 region to store index data.
c.Assert(len(rows), Equals, 5)
c.Assert(len(rows[0]), Equals, 11)
tbl := testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx between (-1000) and (1000) regions 4;`).Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d.*", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 9 regions now.
// 4 regions to store record data.
// 4 region to store index idx data.
// 1 region to store index idx2 data.
c.Assert(len(rows), Equals, 9)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[7][2], Equals, fmt.Sprintf("t_%d_i_2_", tbl.Meta().ID))
c.Assert(rows[8][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test unsigned primary key and wait scatter finish.
tk.MustExec("drop table if exists t_regions")
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("create table t_regions (a int unsigned key, b int, index idx(b))")
// Test show table regions.
tk.MustExec(`set @@session.tidb_wait_split_region_finish=1;`)
tk.MustQuery(`split table t_regions by (2500),(5000),(7500);`).Check(testkit.Rows("3 1"))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Matches, "t_.*")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2500", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_7500", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx by (250),(500),(750);`).Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
// Test show table regions for partition table when disable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Matches, "t_.*")
// Test show table regions for partition table when enable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set @@global.tidb_scatter_region=1;")
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 3)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
// Test split partition region when add new partition.
tk.MustExec("drop table if exists partition_t;")
tk.MustExec(`create table partition_t (a int, b int,index(a)) PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (10),
PARTITION p1 VALUES LESS THAN (20),
PARTITION p2 VALUES LESS THAN (30));`)
tk.MustExec(`alter table partition_t add partition ( partition p3 values less than (40), partition p4 values less than (50) );`)
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 5)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef = tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[3].ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[4].ID))
// Test pre-split table region when create table.
tk.MustExec("drop table if exists t_pre")
tk.MustExec("create table t_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2;")
re = tk.MustQuery("show table t_pre regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_pre")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID))
// Test pre-split table region when create table.
tk.MustExec("drop table if exists pt_pre")
tk.MustExec("create table pt_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2 partition by hash(a) partitions 3;")
re = tk.MustQuery("show table pt_pre regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 12)
tbl = testGetTableByName(c, tk.Se, "test", "pt_pre")
pi := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(len(pi), Equals, 3)
for i, p := range pi {
c.Assert(rows[1+4*i][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", p.ID))
c.Assert(rows[2+4*i][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", p.ID))
c.Assert(rows[3+4*i][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", p.ID))
}
defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
// Test split partition table.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (4000000) regions 4;").Check(testkit.Rows("15 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 20)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i, p := range tbl.Meta().GetPartitionInfo().Definitions {
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
// Test split region for partition table with specified partition.
tk.MustQuery("split table t partition (p4) between (1000000) and (2000000) regions 5;").Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 24)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 4; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
for i := 4; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_1200000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_1400000", p.ID))
c.Assert(rows[i*4+4][1], Equals, fmt.Sprintf("t_%d_r_1600000", p.ID))
c.Assert(rows[i*4+5][1], Equals, fmt.Sprintf("t_%d_r_1800000", p.ID))
c.Assert(rows[i*4+6][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+7][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
// Test for show table partition regions.
for i := 0; i < 4; i++ {
re = tk.MustQuery(fmt.Sprintf("show table t partition (p%v) regions", i))
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
re = tk.MustQuery("show table t partition (p0, p4) regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 12)
p := tbl.Meta().GetPartitionInfo().Definitions[0]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
p = tbl.Meta().GetPartitionInfo().Definitions[4]
c.Assert(rows[4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[5][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[6][1], Equals, fmt.Sprintf("t_%d_r_1200000", p.ID))
c.Assert(rows[7][1], Equals, fmt.Sprintf("t_%d_r_1400000", p.ID))
c.Assert(rows[8][1], Equals, fmt.Sprintf("t_%d_r_1600000", p.ID))
c.Assert(rows[9][1], Equals, fmt.Sprintf("t_%d_r_1800000", p.ID))
c.Assert(rows[10][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[11][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
// Test for duplicate partition names.
re = tk.MustQuery("show table t partition (p0, p0, p0) regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p = tbl.Meta().GetPartitionInfo().Definitions[0]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
// Test split partition table index.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int,index idx(a)) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (4000000) regions 4;").Check(testkit.Rows("20 1"))
tk.MustQuery("split table t index idx between (0) and (4000000) regions 4;").Check(testkit.Rows("20 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 40)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
// Test split index region for partition table with specified partition.
tk.MustQuery("split table t partition (p4) index idx between (0) and (1000000) regions 5;").Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 44)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 4; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
for i := 4; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+8][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+9][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+10][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+11][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
// Test show table partition region on unknown-partition.
err = tk.QueryToErr("show table t partition (p_unknown) index idx regions")
c.Assert(terror.ErrorEqual(err, table.ErrUnknownPartition), IsTrue)
// Test show table partition index.
for i := 0; i < 4; i++ {
re = tk.MustQuery(fmt.Sprintf("show table t partition (p%v) index idx regions", i))
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
re = tk.MustQuery("show table t partition (p3,p4) index idx regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 12)
p = tbl.Meta().GetPartitionInfo().Definitions[3]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
p = tbl.Meta().GetPartitionInfo().Definitions[4]
c.Assert(rows[4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[8][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[9][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[10][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[11][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
// Test split for the second index.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int,index idx(a), index idx2(b))")
tk.MustQuery("split table t index idx2 between (0) and (4000000) regions 2;").Check(testkit.Rows("3 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_3_", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_2_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_2_.*", tbl.Meta().ID))
// Test show table partition region on non-partition table.
err = tk.QueryToErr("show table t partition (p3,p4) index idx regions")
c.Assert(terror.ErrorEqual(err, plannercore.ErrPartitionClauseOnNonpartitioned), IsTrue)
}
func testGetTableByName(c *C, ctx sessionctx.Context, db, table string) table.Table {
dom := domain.GetDomain(ctx)
// Make sure the table schema is the new schema.
err := dom.Reload()
c.Assert(err, IsNil)
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table))
c.Assert(err, IsNil)
return tbl
}
func (s *testSuiteP2) TestIssue10435(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(i int, j int, k int)")
tk.MustExec("insert into t1 VALUES (1,1,1),(2,2,2),(3,3,3),(4,4,4)")
tk.MustExec("INSERT INTO t1 SELECT 10*i,j,5*j FROM t1 UNION SELECT 20*i,j,5*j FROM t1 UNION SELECT 30*i,j,5*j FROM t1")
tk.MustExec("set @@session.tidb_enable_window_function=1")
tk.MustQuery("SELECT SUM(i) OVER W FROM t1 WINDOW w AS (PARTITION BY j ORDER BY i) ORDER BY 1+SUM(i) OVER w").Check(
testkit.Rows("1", "2", "3", "4", "11", "22", "31", "33", "44", "61", "62", "93", "122", "124", "183", "244"),
)
}
func (s *testSuiteP2) TestUnsignedFeedback(c *C) {
tk := testkit.NewTestKit(c, s.store)
oriProbability := statistics.FeedbackProbability.Load()
statistics.FeedbackProbability.Store(1.0)
defer func() { statistics.FeedbackProbability.Store(oriProbability) }()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned, b int, primary key(a))")
tk.MustExec("insert into t values (1,1),(2,2)")
tk.MustExec("analyze table t")
tk.MustQuery("select count(distinct b) from t").Check(testkit.Rows("2"))
result := tk.MustQuery("explain analyze select count(distinct b) from t")
c.Assert(result.Rows()[2][4], Equals, "table:t")
c.Assert(result.Rows()[2][6], Equals, "range:[0,+inf], keep order:false")
}
func (s *testSuiteP2) TestIssue23567(c *C) {
tk := testkit.NewTestKit(c, s.store)
oriProbability := statistics.FeedbackProbability.Load()
statistics.FeedbackProbability.Store(1.0)
defer func() { statistics.FeedbackProbability.Store(oriProbability) }()
failpoint.Enable("github.com/pingcap/tidb/statistics/feedbackNoNDVCollect", `return("")`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned, b int, primary key(a))")
tk.MustExec("insert into t values (1, 1), (2, 2)")
tk.MustExec("analyze table t")
// The SQL should not panic.
tk.MustQuery("select count(distinct b) from t")
failpoint.Disable("github.com/pingcap/tidb/statistics/feedbackNoNDVCollect")
}
func (s *testSuite) TestSummaryFailedUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int as(-a))")
tk.MustExec("insert into t(a) values(1), (3), (7)")
sm := &mockSessionManager1{
PS: make([]*util.ProcessInfo, 0),
}
tk.Se.SetSessionManager(sm)
s.domain.ExpensiveQueryHandle().SetSessionManager(sm)
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionCancel
})
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("set @@tidb_mem_quota_query=1")
err := tk.ExecToErr("update t set t.a = t.a - 1 where t.a in (select a from t where a < 4)")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=1000000000")
tk.MustQuery("select stmt_type from information_schema.statements_summary where digest_text = 'update `t` set `t` . `a` = `t` . `a` - ? where `t` . `a` in ( select `a` from `t` where `a` < ? )'").Check(testkit.Rows("Update"))
}
func (s *testSuite) TestOOMPanicAction(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b double);")
tk.MustExec("insert into t values (1,1)")
sm := &mockSessionManager1{
PS: make([]*util.ProcessInfo, 0),
}
tk.Se.SetSessionManager(sm)
s.domain.ExpensiveQueryHandle().SetSessionManager(sm)
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionCancel
})
tk.MustExec("set @@tidb_mem_quota_query=1;")
err := tk.QueryToErr("select sum(b) from t group by a;")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
// Test insert from select oom panic.
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (a bigint);")
tk.MustExec("create table t1 (a bigint);")
tk.MustExec("set @@tidb_mem_quota_query=200;")
_, err = tk.Exec("insert into t1 values (1),(2),(3),(4),(5);")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
_, err = tk.Exec("replace into t1 values (1),(2),(3),(4),(5);")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000")
tk.MustExec("insert into t1 values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=10;")
_, err = tk.Exec("insert into t select a from t1 order by a desc;")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
_, err = tk.Exec("replace into t select a from t1 order by a desc;")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000")
tk.MustExec("insert into t values (1),(2),(3),(4),(5);")
// Set the memory quota to 244 to make this SQL panic during the DeleteExec
// instead of the TableReaderExec.
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("delete from t")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000;")
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("insert into t values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("delete t, t1 from t join t1 on t.a = t1.a")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=100000;")
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(1),(2),(3)")
// set the memory to quota to make the SQL panic during UpdateExec instead
// of TableReader.
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("update t set a = 4")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
}
type testRecoverTable struct {
store kv.Storage
dom *domain.Domain
cluster cluster.Cluster
cli *regionProperityClient
}
func (s *testRecoverTable) SetUpSuite(c *C) {
cli := ®ionProperityClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
mockstore.WithClusterInspector(func(c cluster.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testRecoverTable) TearDownSuite(c *C) {
s.store.Close()
s.dom.Close()
}
func (s *testRecoverTable) mockGC(tk *testkit.TestKit) (string, string, string, func()) {
originGC := ddl.IsEmulatorGCEnable()
resetGC := func() {
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}
// disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
timeAfterDrop := time.Now().Add(48 * 60 * 60 * time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
return timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC
}
func (s *testRecoverTable) TestRecoverTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
err := failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange")
c.Assert(err, IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("create table t_recover (a int);")
timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC := s.mockGC(tk)
defer resetGC()
tk.MustExec("insert into t_recover values (1),(2),(3)")
tk.MustExec("drop table t_recover")
// if GC safe point is not exists in mysql.tidb
_, err := tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "can not get 'tikv_gc_safe_point'")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Should recover, and we can drop it straight away.
tk.MustExec("recover table t_recover")
tk.MustExec("drop table t_recover")
err = gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
// recover job is before GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeAfterDrop))
_, err = tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), "Can't find dropped/truncated table 't_recover' in GC safe point"), Equals, true)
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if there is a new table with the same name, should return failed.
tk.MustExec("create table t_recover (a int);")
_, err = tk.Exec("recover table t_recover")
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_recover").Error())
// drop the new table with the same name, then recover table.
tk.MustExec("rename table t_recover to t_recover2")
// do recover table.
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (4),(5),(6)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_recover;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// recover table by none exits job.
_, err = tk.Exec(fmt.Sprintf("recover table by job %d", 10000000))
c.Assert(err, NotNil)
// Disable GC by manual first, then after recover table, the GC enable status should also be disabled.
err = gcutil.DisableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("delete from t_recover where a > 1")
tk.MustExec("drop table t_recover")
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (7),(8),(9)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "7", "8", "9"))
// Recover truncate table.
tk.MustExec("truncate table t_recover")
tk.MustExec("rename table t_recover to t_recover_new")
tk.MustExec("recover table t_recover")
tk.MustExec("insert into t_recover values (10)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "7", "8", "9", "10"))
// Test for recover one table multiple time.
tk.MustExec("drop table t_recover")
tk.MustExec("flashback table t_recover to t_recover_tmp")
_, err = tk.Exec("recover table t_recover")
c.Assert(infoschema.ErrTableExists.Equal(err), IsTrue)
gcEnable, err := gcutil.CheckGCEnable(tk.Se)
c.Assert(err, IsNil)
c.Assert(gcEnable, Equals, false)
}
func (s *testRecoverTable) TestFlashbackTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_flashback")
tk.MustExec("use test_flashback")
tk.MustExec("drop table if exists t_flashback")
tk.MustExec("create table t_flashback (a int);")
timeBeforeDrop, _, safePointSQL, resetGC := s.mockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Set GC enable.
err := gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("insert into t_flashback values (1),(2),(3)")
tk.MustExec("drop table t_flashback")
// Test flash table with not_exist_table_name name.
_, err = tk.Exec("flashback table t_not_exists")
c.Assert(err.Error(), Equals, "Can't find dropped/truncated table: t_not_exists in DDL history jobs")
// Test flashback table failed by there is already a new table with the same name.
// If there is a new table with the same name, should return failed.
tk.MustExec("create table t_flashback (a int);")
_, err = tk.Exec("flashback table t_flashback")
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_flashback").Error())
// Drop the new table with the same name, then flashback table.
tk.MustExec("rename table t_flashback to t_flashback_tmp")
// Test for flashback table.
tk.MustExec("flashback table t_flashback")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback values (4),(5),(6)")
tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// Test for flashback to new table.
tk.MustExec("drop table t_flashback")
tk.MustExec("create table t_flashback (a int);")
tk.MustExec("flashback table t_flashback to t_flashback2")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback2 values (7),(8),(9)")
tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback2;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003"))
// Test for flashback one table multiple time.
_, err = tk.Exec("flashback table t_flashback to t_flashback4")
c.Assert(infoschema.ErrTableExists.Equal(err), IsTrue)
// Test for flashback truncated table to new table.
tk.MustExec("truncate table t_flashback2")
tk.MustExec("flashback table t_flashback2 to t_flashback3")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback3 values (10),(11)")
tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback3;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003", "10 15001", "11 15002"))
// Test for flashback drop partition table.
tk.MustExec("drop table if exists t_p_flashback")
tk.MustExec("create table t_p_flashback (a int) partition by hash(a) partitions 4;")
tk.MustExec("insert into t_p_flashback values (1),(2),(3)")
tk.MustExec("drop table t_p_flashback")
tk.MustExec("flashback table t_p_flashback")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_p_flashback order by a;").Check(testkit.Rows("1", "2", "3"))
// Check flashback table autoID.
tk.MustExec("insert into t_p_flashback values (4),(5)")
tk.MustQuery("select a,_tidb_rowid from t_p_flashback order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002"))
// Test for flashback truncate partition table.
tk.MustExec("truncate table t_p_flashback")
tk.MustExec("flashback table t_p_flashback to t_p_flashback1")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_p_flashback1 order by a;").Check(testkit.Rows("1", "2", "3", "4", "5"))
// Check flashback table autoID.
tk.MustExec("insert into t_p_flashback1 values (6)")
tk.MustQuery("select a,_tidb_rowid from t_p_flashback1 order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 10001"))
tk.MustExec("drop database if exists Test2")
tk.MustExec("create database Test2")
tk.MustExec("use Test2")
tk.MustExec("create table t (a int);")
tk.MustExec("insert into t values (1),(2)")
tk.MustExec("drop table t")
tk.MustExec("flashback table t")
tk.MustQuery("select a from t order by a").Check(testkit.Rows("1", "2"))
tk.MustExec("drop table t")
tk.MustExec("drop database if exists Test3")
tk.MustExec("create database Test3")
tk.MustExec("use Test3")
tk.MustExec("create table t (a int);")
tk.MustExec("drop table t")
tk.MustExec("drop database Test3")
tk.MustExec("use Test2")
tk.MustExec("flashback table t")
tk.MustExec("insert into t values (3)")
tk.MustQuery("select a from t order by a").Check(testkit.Rows("1", "2", "3"))
}
func (s *testRecoverTable) TestRecoverTempTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("create global temporary table t_recover (a int) on commit delete rows;")
timeBeforeDrop, _, safePointSQL, resetGC := s.mockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("drop table t_recover")
tk.MustGetErrCode("recover table t_recover;", errno.ErrUnsupportedDDLOperation)
tk.MustGetErrCode("flashback table t_recover;", errno.ErrUnsupportedDDLOperation)
}
func (s *testSuiteP2) TestPointGetPreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
pspk2Id, _, _, err := tk1.Se.PrepareStmt("select * from t where ? = a ")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk2Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
// unique index
psuk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where b = ? ")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[psuk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec(`insert into t values(4, 3, 3, 11)`)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10", "4 3 3 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec("delete from t where a = 4")
tk1.MustExec("alter table t add index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// use pk again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
}
func (s *testSuiteP2) TestPointGetPreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use ps_text")
tk2.MustExec("update t set c = c + 10 where c = 1")
// try to point get again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// try to update in session 1
tk1.MustExec("update t set c = c + 10 where c = 1")
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 11"))
}
func (s *testSuiteP2) TestPointUpdatePreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test")
defer tk1.MustExec("drop database if exists pu_test")
tk1.MustExec("create database pu_test")
tk1.MustExec("use pu_test")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
updateID1, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updateID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(pc, Equals, 1)
updateID2, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 2 where ? = a`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updateID2].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(pc, Equals, 1)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// updateID2
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 8"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
// unique index
updUkID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 10 where b = ?`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updUkID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 20"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 30"))
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 31 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 32 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 42 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 52 10"))
tk1.MustExec("alter table t add unique index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 62 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 72 10"))
tk1.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1 10"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2 10"))
}
func (s *testSuiteP2) TestPointUpdatePreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test2")
defer tk1.MustExec("drop database if exists pu_test2")
tk1.MustExec("create database pu_test2")
tk1.MustExec("use pu_test2")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
ctx := context.Background()
updateID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
tk1.Se.GetSessionVars().PreparedStmts[updateID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(err, IsNil)
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use pu_test2")
tk2.MustExec(`prepare pu2 from "update t set c = c + 2 where ? = a "`)
tk2.MustExec("set @p3 = 3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 7"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// try to update in session 1
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// again next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
tk1.MustExec("commit")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
}
func (s *testSuite1) TestPartitionHashCode(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec(`create table t(c1 bigint, c2 bigint, c3 bigint, primary key(c1))
partition by hash (c1) partitions 4;`)
wg := sync.WaitGroup{}
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
tk1 := testkit.NewTestKitWithInit(c, s.store)
for i := 0; i < 5; i++ {
tk1.MustExec("select * from t")
}
}()
}
wg.Wait()
}
func (s *testSuite1) TestAlterDefaultValue(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(a int, primary key(a))")
tk.MustExec("insert into t(a) values(1)")
tk.MustExec("alter table t add column b int default 1")
tk.MustExec("alter table t alter b set default 2")
tk.MustQuery("select b from t where a = 1").Check(testkit.Rows("1"))
}
type testClusterTableSuite struct {
testSuiteWithCliBase
rpcserver *grpc.Server
listenAddr string
}
func (s *testClusterTableSuite) SetUpSuite(c *C) {
s.testSuiteWithCliBase.SetUpSuite(c)
s.rpcserver, s.listenAddr = s.setUpRPCService(c, "127.0.0.1:0")
}
func (s *testClusterTableSuite) setUpRPCService(c *C, addr string) (*grpc.Server, string) {
sm := &mockSessionManager1{}
sm.PS = append(sm.PS, &util.ProcessInfo{
ID: 1,
User: "root",
Host: "127.0.0.1",
Command: mysql.ComQuery,
})
lis, err := net.Listen("tcp", addr)
c.Assert(err, IsNil)
srv := server.NewRPCServer(config.GetGlobalConfig(), s.dom, sm)
port := lis.Addr().(*net.TCPAddr).Port
addr = fmt.Sprintf("127.0.0.1:%d", port)
go func() {
err = srv.Serve(lis)
c.Assert(err, IsNil)
}()
config.UpdateGlobal(func(conf *config.Config) {
conf.Status.StatusPort = uint(port)
})
return srv, addr
}
func (s *testClusterTableSuite) TearDownSuite(c *C) {
if s.rpcserver != nil {
s.rpcserver.Stop()
s.rpcserver = nil
}
s.testSuiteWithCliBase.TearDownSuite(c)
}
func (s *testSuiteP1) TestPrepareLoadData(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustGetErrCode(`prepare stmt from "load data local infile '/tmp/load_data_test.csv' into table test";`, mysql.ErrUnsupportedPs)
}
func (s *testClusterTableSuite) TestSlowQuery(c *C) {
logData0 := ""
logData1 := `
# Time: 2020-02-15T18:00:01.000000+08:00
select 1;
# Time: 2020-02-15T19:00:05.000000+08:00
select 2;`
logData2 := `
# Time: 2020-02-16T18:00:01.000000+08:00
select 3;
# Time: 2020-02-16T18:00:05.000000+08:00
select 4;`
logData3 := `
# Time: 2020-02-16T19:00:00.000000+08:00
select 5;
# Time: 2020-02-17T18:00:05.000000+08:00
select 6;`
logData4 := `
# Time: 2020-05-14T19:03:54.314615176+08:00
select 7;`
logData := []string{logData0, logData1, logData2, logData3, logData4}
fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log"
fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log"
fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log"
fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log"
fileName4 := "tidb-slow.log"
fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4}
prepareLogs(c, logData, fileNames)
defer func() {
removeFiles(fileNames)
}()
tk := testkit.NewTestKitWithInit(c, s.store)
loc, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
tk.Se.GetSessionVars().TimeZone = loc
tk.MustExec("use information_schema")
cases := []struct {
prepareSQL string
sql string
result []string
}{
{
sql: "select count(*),min(time),max(time) from %s where time > '2019-01-26 21:51:00' and time < now()",
result: []string{"7|2020-02-15 18:00:01.000000|2020-05-14 19:03:54.314615"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-15 19:00:00' and time < '2020-02-16 18:00:02'",
result: []string{"2|2020-02-15 19:00:05.000000|2020-02-16 18:00:01.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 17:00:00'",
result: []string{"2|2020-02-16 18:00:05.000000|2020-02-16 19:00:00.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 20:00:00'",
result: []string{"3|2020-02-16 18:00:05.000000|2020-02-17 18:00:05.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s",
result: []string{"1|2020-05-14 19:03:54.314615|2020-05-14 19:03:54.314615"},
},
{
sql: "select count(*),min(time) from %s where time > '2020-02-16 20:00:00'",
result: []string{"1|2020-02-17 18:00:05.000000"},
},
{
sql: "select count(*) from %s where time > '2020-02-17 20:00:00'",
result: []string{"0"},
},
{
sql: "select query from %s where time > '2019-01-26 21:51:00' and time < now()",
result: []string{"select 1;", "select 2;", "select 3;", "select 4;", "select 5;", "select 6;", "select 7;"},
},
// Test for different timezone.
{
prepareSQL: "set @@time_zone = '+00:00'",
sql: "select time from %s where time = '2020-02-17 10:00:05.000000'",
result: []string{"2020-02-17 10:00:05.000000"},
},
{
prepareSQL: "set @@time_zone = '+02:00'",
sql: "select time from %s where time = '2020-02-17 12:00:05.000000'",
result: []string{"2020-02-17 12:00:05.000000"},
},
// Test for issue 17224
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from %s where time = '2020-05-14 19:03:54.314615'",
result: []string{"2020-05-14 19:03:54.314615"},
},
}
for _, cas := range cases {
if len(cas.prepareSQL) > 0 {
tk.MustExec(cas.prepareSQL)
}
sql := fmt.Sprintf(cas.sql, "slow_query")
tk.MustQuery(sql).Check(testutil.RowsWithSep("|", cas.result...))
sql = fmt.Sprintf(cas.sql, "cluster_slow_query")
tk.MustQuery(sql).Check(testutil.RowsWithSep("|", cas.result...))
}
}
func (s *testClusterTableSuite) TestIssue20236(c *C) {
logData0 := ""
logData1 := `
# Time: 2020-02-15T18:00:01.000000+08:00
select 1;
# Time: 2020-02-15T19:00:05.000000+08:00
select 2;
# Time: 2020-02-15T20:00:05.000000+08:00`
logData2 := `select 3;
# Time: 2020-02-16T18:00:01.000000+08:00
select 4;
# Time: 2020-02-16T18:00:05.000000+08:00
select 5;`
logData3 := `
# Time: 2020-02-16T19:00:00.000000+08:00
select 6;
# Time: 2020-02-17T18:00:05.000000+08:00
select 7;
# Time: 2020-02-17T19:00:00.000000+08:00`
logData4 := `select 8;
# Time: 2020-02-17T20:00:00.000000+08:00
select 9
# Time: 2020-05-14T19:03:54.314615176+08:00
select 10;`
logData := []string{logData0, logData1, logData2, logData3, logData4}
fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log"
fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log"
fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log"
fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log"
fileName4 := "tidb-slow.log"
fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4}
prepareLogs(c, logData, fileNames)
defer func() {
removeFiles(fileNames)
}()
tk := testkit.NewTestKitWithInit(c, s.store)
loc, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
tk.Se.GetSessionVars().TimeZone = loc
tk.MustExec("use information_schema")
cases := []struct {
prepareSQL string
sql string
result []string
}{
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000'",
result: []string{"2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000' order by time desc",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time",
result: []string{"2020-02-15 18:00:01.000000", "2020-02-15 19:00:05.000000", "2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time desc",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000", "2020-02-15 19:00:05.000000", "2020-02-15 18:00:01.000000"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where time > '2020-02-15 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc",
result: []string{"9"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where (time > '2020-02-16 18:00:00' and time < '2020-05-14 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-17 20:00:00')",
result: []string{"6"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-02-17 20:00:00.000000' order by time desc",
result: []string{"5"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc limit 3",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"},
},
}
for _, cas := range cases {
if len(cas.prepareSQL) > 0 {
tk.MustExec(cas.prepareSQL)
}
tk.MustQuery(cas.sql).Check(testutil.RowsWithSep("|", cas.result...))
}
}
func prepareLogs(c *C, logData []string, fileNames []string) {
writeFile := func(file string, data string) {
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
c.Assert(err, IsNil)
_, err = f.Write([]byte(data))
c.Assert(f.Close(), IsNil)
c.Assert(err, IsNil)
}
for i, log := range logData {
writeFile(fileNames[i], log)
}
}
func removeFiles(fileNames []string) {
for _, fileName := range fileNames {
os.Remove(fileName)
}
}
func (s *testSuite1) TestIssue15718(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table tt(a decimal(10, 0), b varchar(1), c time);")
tk.MustExec("insert into tt values(0, '2', null), (7, null, '1122'), (NULL, 'w', null), (NULL, '2', '3344'), (NULL, NULL, '0'), (7, 'f', '33');")
tk.MustQuery("select a and b as d, a or c as e from tt;").Check(testkit.Rows("0 <nil>", "<nil> 1", "0 <nil>", "<nil> 1", "<nil> <nil>", "0 1"))
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table tt(a decimal(10, 0), b varchar(1), c time);")
tk.MustExec("insert into tt values(0, '2', '123'), (7, null, '1122'), (null, 'w', null);")
tk.MustQuery("select a and b as d, a, b from tt order by d limit 1;").Check(testkit.Rows("<nil> 7 <nil>"))
tk.MustQuery("select b or c as d, b, c from tt order by d limit 1;").Check(testkit.Rows("<nil> w <nil>"))
tk.MustExec("drop table if exists t0;")
tk.MustExec("CREATE TABLE t0(c0 FLOAT);")
tk.MustExec("INSERT INTO t0(c0) VALUES (NULL);")
tk.MustQuery("SELECT * FROM t0 WHERE NOT(0 OR t0.c0);").Check(testkit.Rows())
}
func (s *testSuite1) TestIssue15767(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table t(a int, b char);")
tk.MustExec("insert into t values (1,'s'),(2,'b'),(1,'c'),(2,'e'),(1,'a');")
tk.MustExec("insert into t select * from t;")
tk.MustExec("insert into t select * from t;")
tk.MustExec("insert into t select * from t;")
tk.MustQuery("select b, count(*) from ( select b from t order by a limit 20 offset 2) as s group by b order by b;").Check(testkit.Rows("a 6", "c 7", "s 7"))
}
func (s *testSuite1) TestIssue16025(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t0;")
tk.MustExec("CREATE TABLE t0(c0 NUMERIC PRIMARY KEY);")
tk.MustExec("INSERT IGNORE INTO t0(c0) VALUES (NULL);")
tk.MustQuery("SELECT * FROM t0 WHERE c0;").Check(testkit.Rows())
}
func (s *testSuite1) TestIssue16854(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("CREATE TABLE `t` ( `a` enum('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL)")
tk.MustExec("insert into t values(1),(2),(3),(4),(5),(6),(7);")
for i := 0; i < 7; i++ {
tk.MustExec("insert into t select * from t;")
}
tk.MustExec("set @@tidb_max_chunk_size=100;")
tk.MustQuery("select distinct a from t order by a").Check(testkit.Rows("WAITING", "PRINTED", "STOCKUP", "CHECKED", "OUTSTOCK", "PICKEDUP", "WILLBACK"))
tk.MustExec("drop table t")
tk.MustExec("CREATE TABLE `t` ( `a` set('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL)")
tk.MustExec("insert into t values(1),(2),(3),(4),(5),(6),(7);")
for i := 0; i < 7; i++ {
tk.MustExec("insert into t select * from t;")
}
tk.MustExec("set @@tidb_max_chunk_size=100;")
tk.MustQuery("select distinct a from t order by a").Check(testkit.Rows("WAITING", "PRINTED", "WAITING,PRINTED", "STOCKUP", "WAITING,STOCKUP", "PRINTED,STOCKUP", "WAITING,PRINTED,STOCKUP"))
tk.MustExec("drop table t")
}
func (s *testSuite) TestIssue16921(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a float);")
tk.MustExec("create index a on t(a);")
tk.MustExec("insert into t values (1.0), (NULL), (0), (2.0);")
tk.MustQuery("select `a` from `t` use index (a) where !`a`;").Check(testkit.Rows("0"))
tk.MustQuery("select `a` from `t` ignore index (a) where !`a`;").Check(testkit.Rows("0"))
tk.MustQuery("select `a` from `t` use index (a) where `a`;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select `a` from `t` ignore index (a) where `a`;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not a is true;").Check(testkit.Rows("<nil>", "0"))
tk.MustQuery("select a from t use index (a) where not not a is true;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not not a;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not not not a is true;").Check(testkit.Rows("<nil>", "0"))
tk.MustQuery("select a from t use index (a) where not not not a;").Check(testkit.Rows("0"))
}
func (s *testSuite) TestIssue19100(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1 (c decimal);")
tk.MustExec("create table t2 (c decimal, key(c));")
tk.MustExec("insert into t1 values (null);")
tk.MustExec("insert into t2 values (null);")
tk.MustQuery("select count(*) from t1 where not c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t2 where not c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t1 where c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t2 where c;").Check(testkit.Rows("0"))
}
// this is from jira issue #5856
func (s *testSuite1) TestInsertValuesWithSubQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a int, b int, c int)")
defer tk.MustExec("drop table if exists t2")
// should not reference upper scope
c.Assert(tk.ExecToErr("insert into t2 values (11, 8, (select not b))"), NotNil)
c.Assert(tk.ExecToErr("insert into t2 set a = 11, b = 8, c = (select b))"), NotNil)
// subquery reference target table is allowed
tk.MustExec("insert into t2 values(1, 1, (select b from t2))")
tk.MustQuery("select * from t2").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("insert into t2 set a = 1, b = 1, c = (select b+1 from t2)")
tk.MustQuery("select * from t2").Check(testkit.Rows("1 1 <nil>", "1 1 2"))
// insert using column should work normally
tk.MustExec("delete from t2")
tk.MustExec("insert into t2 values(2, 4, a)")
tk.MustQuery("select * from t2").Check(testkit.Rows("2 4 2"))
tk.MustExec("insert into t2 set a = 3, b = 5, c = b")
tk.MustQuery("select * from t2").Check(testkit.Rows("2 4 2", "3 5 5"))
}
func (s *testSuite1) TestDIVZeroInPartitionExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int) partition by range (10 div a) (partition p0 values less than (10), partition p1 values less than maxvalue)")
defer tk.MustExec("drop table if exists t1")
tk.MustExec("set @@sql_mode=''")
tk.MustExec("insert into t1 values (NULL), (0), (1)")
tk.MustExec("set @@sql_mode='STRICT_ALL_TABLES,ERROR_FOR_DIVISION_BY_ZERO'")
tk.MustGetErrCode("insert into t1 values (NULL), (0), (1)", mysql.ErrDivisionByZero)
}
func (s *testSuite1) TestInsertIntoGivenPartitionSet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec(`create table t1(
a int(11) DEFAULT NULL,
b varchar(10) DEFAULT NULL,
UNIQUE KEY idx_a (a)) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
defer tk.MustExec("drop table if exists t1")
// insert into
tk.MustExec("insert into t1 partition(p0) values(1, 'a'), (2, 'b')")
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b"))
tk.MustExec("insert into t1 partition(p0, p1) values(3, 'c'), (4, 'd')")
tk.MustQuery("select * from t1 partition(p1)").Check(testkit.Rows())
tk.MustGetErrMsg("insert into t1 values(1, 'a')", "[kv:1062]Duplicate entry '1' for key 'idx_a'")
tk.MustGetErrMsg("insert into t1 partition(p0, p_non_exist) values(1, 'a')", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("insert into t1 partition(p0, p1) values(40, 'a')", "[table:1748]Found a row not matching the given partition set")
// replace into
tk.MustExec("replace into t1 partition(p0) values(1, 'replace')")
tk.MustExec("replace into t1 partition(p0, p1) values(3, 'replace'), (4, 'replace')")
tk.MustExec("replace into t1 values(1, 'a')")
tk.MustQuery("select * from t1 partition (p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 replace", "4 replace"))
tk.MustGetErrMsg("replace into t1 partition(p0, p_non_exist) values(1, 'a')", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("replace into t1 partition(p0, p1) values(40, 'a')", "[table:1748]Found a row not matching the given partition set")
tk.MustExec("truncate table t1")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b char(10))")
defer tk.MustExec("drop table if exists t")
// insert into general table
tk.MustGetErrMsg("insert into t partition(p0, p1) values(1, 'a')", "[planner:1747]PARTITION () clause on non partitioned table")
// insert into from select
tk.MustExec("insert into t values(1, 'a'), (2, 'b')")
tk.MustExec("insert into t1 partition(p0) select * from t")
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b"))
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(3, 'c'), (4, 'd')")
tk.MustExec("insert into t1 partition(p0, p1) select * from t")
tk.MustQuery("select * from t1 partition(p1) order by a").Check(testkit.Rows())
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 c", "4 d"))
tk.MustGetErrMsg("insert into t1 select 1, 'a'", "[kv:1062]Duplicate entry '1' for key 'idx_a'")
tk.MustGetErrMsg("insert into t1 partition(p0, p_non_exist) select 1, 'a'", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("insert into t1 partition(p0, p1) select 40, 'a'", "[table:1748]Found a row not matching the given partition set")
// replace into from select
tk.MustExec("replace into t1 partition(p0) select 1, 'replace'")
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(3, 'replace'), (4, 'replace')")
tk.MustExec("replace into t1 partition(p0, p1) select * from t")
tk.MustExec("replace into t1 select 1, 'a'")
tk.MustQuery("select * from t1 partition (p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 replace", "4 replace"))
tk.MustGetErrMsg("replace into t1 partition(p0, p_non_exist) select 1, 'a'", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("replace into t1 partition(p0, p1) select 40, 'a'", "[table:1748]Found a row not matching the given partition set")
}
func (s *testSuite1) TestUpdateGivenPartitionSet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1,t2,t3,t4")
tk.MustExec(`create table t1(
a int(11),
b varchar(10) DEFAULT NULL,
primary key idx_a (a)) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
tk.MustExec(`create table t2(
a int(11) DEFAULT NULL,
b varchar(10) DEFAULT NULL) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
tk.MustExec(`create table t3 (a int(11), b varchar(10) default null)`)
defer tk.MustExec("drop table if exists t1,t2,t3")
tk.MustExec("insert into t3 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err := tk.ExecToErr("update t3 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[planner:1747]PARTITION () clause on non partitioned table")
// update with primary key change
tk.MustExec("insert into t1 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err = tk.ExecToErr("update t1 partition(p0, p1) set a = 40")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
err = tk.ExecToErr("update t1 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
// test non-exist partition.
err = tk.ExecToErr("update t1 partition (p0, p_non_exist) set a = 40")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
// test join.
err = tk.ExecToErr("update t1 partition (p0), t3 set t1.a = 40 where t3.a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
tk.MustExec("update t1 partition(p0) set a = 3 where a = 2")
tk.MustExec("update t1 partition(p0, p3) set a = 33 where a = 1")
// update without partition change
tk.MustExec("insert into t2 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err = tk.ExecToErr("update t2 partition(p0, p1) set a = 40")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
err = tk.ExecToErr("update t2 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
tk.MustExec("update t2 partition(p0) set a = 3 where a = 2")
tk.MustExec("update t2 partition(p0, p3) set a = 33 where a = 1")
tk.MustExec("create table t4(a int primary key, b int) partition by hash(a) partitions 2")
tk.MustExec("insert into t4(a, b) values(1, 1),(2, 2),(3, 3);")
err = tk.ExecToErr("update t4 partition(p0) set a = 5 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
}
func (s *testSuiteP2) TestApplyCache(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values (1),(1),(1),(1),(1),(1),(1),(1),(1);")
tk.MustExec("analyze table t;")
result := tk.MustQuery("explain analyze SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t;")
c.Assert(result.Rows()[1][0], Equals, "└─Apply_39")
var (
ind int
flag bool
)
value := (result.Rows()[1][5]).(string)
for ind = 0; ind < len(value)-5; ind++ {
if value[ind:ind+5] == "cache" {
flag = true
break
}
}
c.Assert(flag, Equals, true)
c.Assert(value[ind:], Equals, "cache:ON, cacheHitRatio:88.889%")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7),(8),(9);")
tk.MustExec("analyze table t;")
result = tk.MustQuery("explain analyze SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t;")
c.Assert(result.Rows()[1][0], Equals, "└─Apply_39")
flag = false
value = (result.Rows()[1][5]).(string)
for ind = 0; ind < len(value)-5; ind++ {
if value[ind:ind+5] == "cache" {
flag = true
break
}
}
c.Assert(flag, Equals, true)
c.Assert(value[ind:], Equals, "cache:OFF")
}
// For issue 17256
func (s *testSuite) TestGenerateColumnReplace(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int as (a + 1) virtual not null, unique index idx(b));")
tk.MustExec("REPLACE INTO `t1` (`a`) VALUES (2);")
tk.MustExec("REPLACE INTO `t1` (`a`) VALUES (2);")
tk.MustQuery("select * from t1").Check(testkit.Rows("2 3"))
tk.MustExec("insert into `t1` (`a`) VALUES (2) on duplicate key update a = 3;")
tk.MustQuery("select * from t1").Check(testkit.Rows("3 4"))
}
func (s *testSlowQuery) TestSlowQueryWithoutSlowLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
newCfg.Log.SlowQueryFile = "tidb-slow-not-exist.log"
newCfg.Log.SlowThreshold = math.MaxUint64
config.StoreGlobalConfig(&newCfg)
defer func() {
config.StoreGlobalConfig(originCfg)
}()
tk.MustQuery("select query from information_schema.slow_query").Check(testkit.Rows())
tk.MustQuery("select query from information_schema.slow_query where time > '2020-09-15 12:16:39' and time < now()").Check(testkit.Rows())
}
func (s *testSlowQuery) TestSlowQuerySensitiveQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
f, err := ioutil.TempFile("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
tk.MustExec("set tidb_slow_log_threshold=300;")
config.StoreGlobalConfig(originCfg)
err = os.Remove(newCfg.Log.SlowQueryFile)
c.Assert(err, IsNil)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustExec("drop user if exists user_sensitive;")
tk.MustExec("create user user_sensitive identified by '123456789';")
tk.MustExec("alter user 'user_sensitive'@'%' identified by 'abcdefg';")
tk.MustExec("set password for 'user_sensitive'@'%' = 'xyzuvw';")
tk.MustQuery("select query from `information_schema`.`slow_query` " +
"where (query like 'set password%' or query like 'create user%' or query like 'alter user%') " +
"and query like '%user_sensitive%' order by query;").
Check(testkit.Rows(
"alter user {user_sensitive@% password = ***};",
"create user {user_sensitive@% password = ***};",
"set password for user user_sensitive@%;",
))
}
func (s *testSlowQuery) TestSlowQueryPrepared(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
f, err := ioutil.TempFile("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
tk.MustExec("set tidb_slow_log_threshold=300;")
tk.MustExec("set tidb_redact_log=0;")
config.StoreGlobalConfig(originCfg)
os.Remove(newCfg.Log.SlowQueryFile)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustExec(`prepare mystmt1 from 'select sleep(?), 1';`)
tk.MustExec("SET @num = 0.01;")
tk.MustExec("execute mystmt1 using @num;")
tk.MustQuery("SELECT Query FROM `information_schema`.`slow_query` " +
"where query like 'select%sleep%' order by time desc limit 1").
Check(testkit.Rows(
"select sleep(?), 1 [arguments: 0.01];",
))
tk.MustExec("set tidb_redact_log=1;")
tk.MustExec(`prepare mystmt2 from 'select sleep(?), 2';`)
tk.MustExec("execute mystmt2 using @num;")
tk.MustQuery("SELECT Query FROM `information_schema`.`slow_query` " +
"where query like 'select%sleep%' order by time desc limit 1").
Check(testkit.Rows(
"select `sleep` ( ? ) , ?;",
))
}
func (s *testSlowQuery) TestLogSlowLogIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
f, err := ioutil.TempFile("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.Log.SlowQueryFile = f.Name()
})
err = logutil.InitLogger(config.GetGlobalConfig().Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("use test")
tk.MustExec("create table t (a int, b int,index idx(a));")
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustQuery("select * from t use index (idx) where a in (1) union select * from t use index (idx) where a in (2,3);")
tk.MustExec("set tidb_slow_log_threshold=300;")
tk.MustQuery("select index_names from `information_schema`.`slow_query` " +
"where query like 'select%union%' limit 1").
Check(testkit.Rows(
"[t:idx]",
))
}
func (s *testSlowQuery) TestSlowQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
f, err := ioutil.TempFile("", "tidb-slow-*.log")
c.Assert(err, IsNil)
_, err = f.WriteString(`
# Time: 2020-10-13T20:08:13.970563+08:00
select * from t;
# Time: 2020-10-16T20:08:13.970563+08:00
select * from t;
`)
c.Assert(err, IsNil)
err = f.Close()
c.Assert(err, IsNil)
executor.ParseSlowLogBatchSize = 1
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
executor.ParseSlowLogBatchSize = 64
config.StoreGlobalConfig(originCfg)
err = os.Remove(newCfg.Log.SlowQueryFile)
c.Assert(err, IsNil)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time > '2020-10-16 20:08:13' and time < '2020-10-16 21:08:13'").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time > '2019-10-13 20:08:13' and time < '2020-10-16 21:08:13'").Check(testkit.Rows("2"))
}
func (s *testSerialSuite) TestKillTableReader(c *C) {
var retry = "github.com/pingcap/tidb/store/tikv/mockRetrySendReqToRegion"
defer func() {
c.Assert(failpoint.Disable(retry), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int)")
tk.MustExec("insert into t values (1),(2),(3)")
tk.MustExec("set @@tidb_distsql_scan_concurrency=1")
atomic.StoreUint32(&tk.Se.GetSessionVars().Killed, 0)
c.Assert(failpoint.Enable(retry, `return(true)`), IsNil)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(1 * time.Second)
err := tk.QueryToErr("select * from t")
c.Assert(err, NotNil)
c.Assert(int(terror.ToSQLError(errors.Cause(err).(*terror.Error)).Code), Equals, int(executor.ErrQueryInterrupted.Code()))
}()
atomic.StoreUint32(&tk.Se.GetSessionVars().Killed, 1)
wg.Wait()
}
func (s *testSerialSuite) TestPrevStmtDesensitization(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec(fmt.Sprintf("set @@session.%v=1", variable.TiDBRedactLog))
defer tk.MustExec(fmt.Sprintf("set @@session.%v=0", variable.TiDBRedactLog))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, unique key (a))")
tk.MustExec("begin")
tk.MustExec("insert into t values (1),(2)")
c.Assert(tk.Se.GetSessionVars().PrevStmt.String(), Equals, "insert into `t` values ( ? ) , ( ? )")
c.Assert(tk.ExecToErr("insert into t values (1)").Error(), Equals, `[kv:1062]Duplicate entry '?' for key 'a'`)
}
func (s *testSuite) TestIssue19372(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1 (c_int int, c_str varchar(40), key(c_str));")
tk.MustExec("create table t2 like t1;")
tk.MustExec("insert into t1 values (1, 'a'), (2, 'b'), (3, 'c');")
tk.MustExec("insert into t2 select * from t1;")
tk.MustQuery("select (select t2.c_str from t2 where t2.c_str <= t1.c_str and t2.c_int in (1, 2) order by t2.c_str limit 1) x from t1 order by c_int;").Check(testkit.Rows("a", "a", "a"))
}
func (s *testSerialSuite1) TestCollectCopRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("set tidb_enable_collect_execution_info=1;")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/tikvStoreRespResult", `return(true)`), IsNil)
rows := tk.MustQuery("explain analyze select * from t1").Rows()
c.Assert(len(rows), Equals, 2)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*rpc_num: 2, .*regionMiss:.*")
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/tikvStoreRespResult"), IsNil)
}
func (s *testSerialSuite1) TestIndexLookupRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int, index(a))")
tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)")
sql := "explain analyze select * from t1 use index(a) where a > 1;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 3)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*")
indexExplain := fmt.Sprintf("%v", rows[1])
tableExplain := fmt.Sprintf("%v", rows[2])
c.Assert(indexExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(tableExplain, Matches, ".*time:.*loops:.*cop_task:.*")
}
func (s *testSerialSuite1) TestHashAggRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)")
sql := "explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 5)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*")
}
func (s *testSerialSuite1) TestIndexMergeRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("set @@tidb_enable_index_merge = 1")
tk.MustExec("create table t1(id int primary key, a int, b int, c int, d int)")
tk.MustExec("create index t1a on t1(a)")
tk.MustExec("create index t1b on t1(b)")
tk.MustExec("insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5)")
sql := "explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 4)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*")
tableRangeExplain := fmt.Sprintf("%v", rows[1])
indexExplain := fmt.Sprintf("%v", rows[2])
tableExplain := fmt.Sprintf("%v", rows[3])
c.Assert(tableRangeExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(indexExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(tableExplain, Matches, ".*time:.*loops:.*cop_task:.*")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
sql = "select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a"
tk.MustQuery(sql).Check(testkit.Rows("1 1 1 1 1", "5 5 5 5 5"))
}
func (s *testSuite) TestCollectDMLRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int, unique index (a))")
testSQLs := []string{
"insert ignore into t1 values (5,5);",
"insert into t1 values (5,5) on duplicate key update a=a+1;",
"replace into t1 values (5,6),(6,7)",
"update t1 set a=a+1 where a=6;",
}
getRootStats := func() string {
info := tk.Se.ShowProcess()
c.Assert(info, NotNil)
p, ok := info.Plan.(plannercore.Plan)
c.Assert(ok, IsTrue)
stats := tk.Se.GetSessionVars().StmtCtx.RuntimeStatsColl.GetRootStats(p.ID())
return stats.String()
}
for _, sql := range testSQLs {
tk.MustExec(sql)
c.Assert(getRootStats(), Matches, "time.*loops.*Get.*num_rpc.*total_time.*")
}
// Test for lock keys stats.
tk.MustExec("begin pessimistic")
tk.MustExec("update t1 set b=b+1")
c.Assert(getRootStats(), Matches, "time.*lock_keys.*time.* region.* keys.* lock_rpc:.* rpc_count.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustQuery("select * from t1 for update").Check(testkit.Rows("5 6", "7 7"))
c.Assert(getRootStats(), Matches, "time.*lock_keys.*time.* region.* keys.* lock_rpc:.* rpc_count.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert ignore into t1 values (9,9)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:{BatchGet:{num_rpc:.*, total_time:.*}}}.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert into t1 values (10,10) on duplicate key update a=a+1")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:{BatchGet:{num_rpc:.*, total_time:.*}.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert into t1 values (1,2)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, insert:.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert ignore into t1 values(11,11) on duplicate key update `a`=`a`+1")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:.*}")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("replace into t1 values (1,4)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prefetch:.*, rpc:.*")
tk.MustExec("rollback")
}
func (s *testSuite) TestIssue13758(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (pk int(11) primary key, a int(11) not null, b int(11), key idx_b(b), key idx_a(a))")
tk.MustExec("insert into `t1` values (1,1,0),(2,7,6),(3,2,null),(4,1,null),(5,4,5)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert into t2 values (1),(null)")
tk.MustQuery("select (select a from t1 use index(idx_a) where b >= t2.a order by a limit 1) as field from t2").Check(testkit.Rows(
"4",
"<nil>",
))
}
func (s *testCoprCache) SetUpSuite(c *C) {
originConfig := config.GetGlobalConfig()
config.StoreGlobalConfig(config.NewConfig())
defer config.StoreGlobalConfig(originConfig)
cli := ®ionProperityClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c cluster.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cls = c
}),
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testCoprCache) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testCoprCache) TestIntegrationCopCache(c *C) {
originConfig := config.GetGlobalConfig()
config.StoreGlobalConfig(config.NewConfig())
defer config.StoreGlobalConfig(originConfig)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tblInfo, err := s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tid := tblInfo.Meta().ID
tk.MustExec(`insert into t values(1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12)`)
tableStart := tablecodec.GenTableRecordPrefix(tid)
s.cls.SplitKeys(tableStart, tableStart.PrefixNext(), 6)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/cophandler/mockCopCacheInUnistore", `return(123)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/cophandler/mockCopCacheInUnistore"), IsNil)
}()
rows := tk.MustQuery("explain analyze select * from t where t.a < 10").Rows()
c.Assert(rows[0][2], Equals, "9")
c.Assert(strings.Contains(rows[0][5].(string), "cop_task: {num: 5"), Equals, true)
c.Assert(strings.Contains(rows[0][5].(string), "copr_cache_hit_ratio: 0.00"), Equals, true)
rows = tk.MustQuery("explain analyze select * from t").Rows()
c.Assert(rows[0][2], Equals, "12")
c.Assert(strings.Contains(rows[0][5].(string), "cop_task: {num: 6"), Equals, true)
hitRatioIdx := strings.Index(rows[0][5].(string), "copr_cache_hit_ratio:") + len("copr_cache_hit_ratio: ")
c.Assert(hitRatioIdx >= len("copr_cache_hit_ratio: "), Equals, true)
hitRatio, err := strconv.ParseFloat(rows[0][5].(string)[hitRatioIdx:hitRatioIdx+4], 64)
c.Assert(err, IsNil)
c.Assert(hitRatio > 0, Equals, true)
// Test for cop cache disabled.
cfg := config.NewConfig()
cfg.TiKVClient.CoprCache.CapacityMB = 0
config.StoreGlobalConfig(cfg)
rows = tk.MustQuery("explain analyze select * from t where t.a < 10").Rows()
c.Assert(rows[0][2], Equals, "9")
c.Assert(strings.Contains(rows[0][5].(string), "copr_cache: disabled"), Equals, true)
}
func (s *testSerialSuite) TestCoprocessorOOMTicase(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`set @@tidb_wait_split_region_finish=1`)
// create table for non keep-order case
tk.MustExec("drop table if exists t5")
tk.MustExec("create table t5(id int)")
tk.MustQuery(`split table t5 between (0) and (10000) regions 10`).Check(testkit.Rows("9 1"))
// create table for keep-order case
tk.MustExec("drop table if exists t6")
tk.MustExec("create table t6(id int, index(id))")
tk.MustQuery(`split table t6 between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
tk.MustQuery("split table t6 INDEX id between (0) and (10000) regions 10;").Check(testkit.Rows("10 1"))
count := 10
for i := 0; i < count; i++ {
tk.MustExec(fmt.Sprintf("insert into t5 (id) values (%v)", i))
tk.MustExec(fmt.Sprintf("insert into t6 (id) values (%v)", i))
}
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionLog
})
testcases := []struct {
name string
sql string
}{
{
name: "keep Order",
sql: "select id from t6 order by id",
},
{
name: "non keep Order",
sql: "select id from t5",
},
}
f := func() {
for _, testcase := range testcases {
c.Log(testcase.name)
// larger than one copResponse, smaller than 2 copResponse
quota := 2*copr.MockResponseSizeForTest - 100
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
tk.Se = se
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("set @@tidb_mem_quota_query=%v;", quota))
var expect []string
for i := 0; i < count; i++ {
expect = append(expect, fmt.Sprintf("%v", i))
}
tk.MustQuery(testcase.sql).Sort().Check(testkit.Rows(expect...))
// assert oom action worked by max consumed > memory quota
c.Assert(tk.Se.GetSessionVars().StmtCtx.MemTracker.MaxConsumed(), Greater, int64(quota))
se.Close()
}
}
// ticase-4169, trigger oom action twice after workers consuming all the data
err := failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4169", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4169")
c.Assert(err, IsNil)
// ticase-4170, trigger oom action twice after iterator receiving all the data.
err = failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4170", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4170")
c.Assert(err, IsNil)
// ticase-4171, trigger oom before reading or consuming any data
err = failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4171", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4171")
c.Assert(err, IsNil)
}
func (s *testSuite) TestIssue20237(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, s")
tk.MustExec("create table t(a date, b float)")
tk.MustExec("create table s(b float)")
tk.MustExec(`insert into t values(NULL,-37), ("2011-11-04",105), ("2013-03-02",-22), ("2006-07-02",-56), (NULL,124), (NULL,111), ("2018-03-03",-5);`)
tk.MustExec(`insert into s values(-37),(105),(-22),(-56),(124),(105),(111),(-5);`)
tk.MustQuery(`select count(distinct t.a, t.b) from t join s on t.b= s.b;`).Check(testkit.Rows("4"))
}
func (s *testSerialSuite) TestIssue19148(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(16, 2));")
tk.MustExec("select * from t where a > any_value(a);")
ctx := tk.Se.(sessionctx.Context)
is := domain.GetDomain(ctx).InfoSchema()
tblInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
c.Assert(int(tblInfo.Meta().Columns[0].Flag), Equals, 0)
}
func (s *testSuite) TestIssue19667(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DATETIME)")
tk.MustExec("INSERT INTO t VALUES('1988-04-17 01:59:59')")
tk.MustQuery(`SELECT DATE_ADD(a, INTERVAL 1 SECOND) FROM t`).Check(testkit.Rows("1988-04-17 02:00:00"))
}
func issue20975Prepare(c *C, store kv.Storage) (*testkit.TestKit, *testkit.TestKit) {
tk1 := testkit.NewTestKit(c, store)
tk2 := testkit.NewTestKit(c, store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t1, t2")
tk2.MustExec("use test")
tk1.MustExec("create table t1(id int primary key, c int)")
tk1.MustExec("insert into t1 values(1, 10), (2, 20)")
return tk1, tk2
}
func (s *testSuite) TestIssue20975UpdateNoChange(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin pessimistic")
tk1.MustExec("update t1 set c=c")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdate(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdatePointGet(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateBatchPointGet(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func issue20975PreparePartitionTable(c *C, store kv.Storage) (*testkit.TestKit, *testkit.TestKit) {
tk1 := testkit.NewTestKit(c, store)
tk2 := testkit.NewTestKit(c, store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t1, t2")
tk2.MustExec("use test")
tk1.MustExec(`create table t1(id int primary key, c int) partition by range (id) (
partition p1 values less than (10),
partition p2 values less than (20)
)`)
tk1.MustExec("insert into t1 values(1, 10), (2, 20), (11, 30), (12, 40)")
return tk1, tk2
}
func (s *testSuite) TestIssue20975UpdateNoChangeWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin pessimistic")
tk1.MustExec("update t1 set c=c")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdatePointGetWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=12 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=12 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateBatchPointGetWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (11, 12) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 11) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (11, 12) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 11) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20305(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t2 (a year(4))")
tk.MustExec("insert into t2 values(69)")
tk.MustQuery("select * from t2 where a <= 69").Check(testkit.Rows("2069"))
// the following test is a regression test that matches MySQL's behavior.
tk.MustExec("drop table if exists t3")
tk.MustExec("CREATE TABLE `t3` (`y` year DEFAULT NULL, `a` int DEFAULT NULL)")
tk.MustExec("INSERT INTO `t3` VALUES (2069, 70), (2010, 11), (2155, 2156), (2069, 69)")
tk.MustQuery("SELECT * FROM `t3` where y <= a").Check(testkit.Rows("2155 2156"))
}
func (s *testSuite) TestIssue22817(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (a year)")
tk.MustExec("insert into t3 values (1991), (\"1992\"), (\"93\"), (94)")
tk.MustQuery("select * from t3 where a >= NULL").Check(testkit.Rows())
}
func (s *testSuite) TestIssue13953(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (`id` int(11) DEFAULT NULL, `tp_bigint` bigint(20) DEFAULT NULL )")
tk.MustExec("insert into t values(0,1),(1,9215570218099803537)")
tk.MustQuery("select A.tp_bigint,B.id from t A join t B on A.id < B.id * 16 where A.tp_bigint = B.id;").Check(
testkit.Rows("1 1"))
}
func (s *testSuite) TestZeroDateTimeCompatibility(c *C) {
SQLs := []string{
`select YEAR(0000-00-00), YEAR("0000-00-00")`,
`select MONTH(0000-00-00), MONTH("0000-00-00")`,
`select DAYOFWEEK(0000-00-00), DAYOFWEEK("0000-00-00")`,
`select DAYOFMONTH(0000-00-00), DAYOFMONTH("0000-00-00")`,
`select DAYOFYEAR(0000-00-00), DAYOFYEAR("0000-00-00")`,
`select QUARTER(0000-00-00), QUARTER("0000-00-00")`,
`select EXTRACT(DAY FROM 0000-00-00), EXTRACT(DAY FROM "0000-00-00")`,
`select EXTRACT(MONTH FROM 0000-00-00), EXTRACT(MONTH FROM "0000-00-00")`,
`select EXTRACT(YEAR FROM 0000-00-00), EXTRACT(YEAR FROM "0000-00-00")`,
`select EXTRACT(WEEK FROM 0000-00-00), EXTRACT(WEEK FROM "0000-00-00")`,
`select EXTRACT(QUARTER FROM 0000-00-00), EXTRACT(QUARTER FROM "0000-00-00")`,
}
tk := testkit.NewTestKit(c, s.store)
for _, t := range SQLs {
fmt.Println(t)
tk.MustQuery(t).Check(testkit.Rows("0 <nil>"))
c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1))
}
}
// https://github.com/pingcap/tidb/issues/24165.
func (s *testSuite) TestInvalidDateValueInCreateTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE';")
tk.MustGetErrCode("create table t (a datetime default '2999-00-00 00:00:00');", errno.ErrInvalidDefault)
tk.MustGetErrCode("create table t (a datetime default '2999-02-30 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("create table t (a datetime);")
tk.MustGetErrCode("alter table t modify column a datetime default '2999-00-00 00:00:00';", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
tk.MustExec("set @@sql_mode = (select replace(@@sql_mode,'NO_ZERO_IN_DATE',''));")
tk.MustExec("set @@sql_mode = (select replace(@@sql_mode,'NO_ZERO_DATE',''));")
tk.MustExec("set @@sql_mode=(select concat(@@sql_mode, ',ALLOW_INVALID_DATES'));")
// Test create table with zero datetime as a default value.
tk.MustExec("create table t (a datetime default '2999-00-00 00:00:00');")
tk.MustExec("drop table if exists t;")
// Test create table with invalid datetime(02-30) as a default value.
tk.MustExec("create table t (a datetime default '2999-02-30 00:00:00');")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a datetime);")
tk.MustExec("alter table t modify column a datetime default '2999-00-00 00:00:00';")
tk.MustExec("drop table if exists t;")
}
func (s *testSuite) TestOOMActionPriority(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t0")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("drop table if exists t3")
tk.MustExec("drop table if exists t4")
tk.MustExec("create table t0(a int)")
tk.MustExec("insert into t0 values(1)")
tk.MustExec("create table t1(a int)")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("create table t2(a int)")
tk.MustExec("insert into t2 values(1)")
tk.MustExec("create table t3(a int)")
tk.MustExec("insert into t3 values(1)")
tk.MustExec("create table t4(a int)")
tk.MustExec("insert into t4 values(1)")
tk.MustQuery("select * from t0 join t1 join t2 join t3 join t4 order by t0.a").Check(testkit.Rows("1 1 1 1 1"))
action := tk.Se.GetSessionVars().StmtCtx.MemTracker.GetFallbackForTest()
// check the first 5 actions is rate limit.
for i := 0; i < 5; i++ {
c.Assert(action.GetPriority(), Equals, int64(memory.DefRateLimitPriority))
action = action.GetFallback()
}
for action.GetFallback() != nil {
c.Assert(action.GetPriority(), Equals, int64(memory.DefSpillPriority))
action = action.GetFallback()
}
c.Assert(action.GetPriority(), Equals, int64(memory.DefLogPriority))
}
func (s *testSuite) Test17780(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t0")
tk.MustExec("create table t0 (c0 double)")
tk.MustExec("insert into t0 values (1e30)")
tk.MustExec("update t0 set c0=0 where t0.c0 like 0")
// the update should not affect c0
tk.MustQuery("select count(*) from t0 where c0 = 0").Check(testkit.Rows("0"))
}
func (s *testSuite) TestIssue9918(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a year)")
tk.MustExec("insert into t values(0)")
tk.MustQuery("select cast(a as char) from t").Check(testkit.Rows("0000"))
}
func (s *testSuite) Test13004(c *C) {
tk := testkit.NewTestKit(c, s.store)
// see https://dev.mysql.com/doc/refman/5.6/en/date-and-time-literals.html, timestamp here actually produces a datetime
tk.MustQuery("SELECT TIMESTAMP '9999-01-01 00:00:00'").Check(testkit.Rows("9999-01-01 00:00:00"))
}
func (s *testSuite) Test12178(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists ta")
tk.MustExec("create table ta(id decimal(60,2))")
tk.MustExec("insert into ta values (JSON_EXTRACT('{\"c\": \"1234567890123456789012345678901234567890123456789012345\"}', '$.c'))")
tk.MustQuery("select * from ta").Check(testkit.Rows("1234567890123456789012345678901234567890123456789012345.00"))
}
func (s *testSuite) Test11883(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (f1 json)")
tk.MustExec("insert into t1(f1) values ('\"asd\"'),('\"asdf\"'),('\"asasas\"')")
tk.MustQuery("select f1 from t1 where json_extract(f1,\"$\") in (\"asd\",\"asasas\",\"asdf\")").Check(testkit.Rows("\"asd\"", "\"asdf\"", "\"asasas\""))
tk.MustQuery("select f1 from t1 where json_extract(f1, '$') = 'asd'").Check(testkit.Rows("\"asd\""))
// MySQL produces empty row for the following SQL, I doubt it should be MySQL's bug.
tk.MustQuery("select f1 from t1 where case json_extract(f1,\"$\") when \"asd\" then 1 else 0 end").Check(testkit.Rows("\"asd\""))
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 values ('{\"a\": 1}')")
// the first value in the tuple should be interpreted as string instead of JSON, so no row will be returned
tk.MustQuery("select f1 from t1 where f1 in ('{\"a\": 1}', 'asdf', 'asdf')").Check(testkit.Rows())
// and if we explicitly cast it into a JSON value, the check will pass
tk.MustQuery("select f1 from t1 where f1 in (cast('{\"a\": 1}' as JSON), 'asdf', 'asdf')").Check(testkit.Rows("{\"a\": 1}"))
tk.MustQuery("select json_extract('\"asd\"', '$') = 'asd'").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('\"asd\"', '$') <=> 'asd'").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('\"asd\"', '$') <> 'asd'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"f\": 1.0}', '$.f') = 1.0").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('{\"f\": 1.0}', '$.f') = '1.0'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"n\": 1}', '$') = '{\"n\": 1}'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"n\": 1}', '$') <> '{\"n\": 1}'").Check(testkit.Rows("1"))
}
func (s *testSuite) Test15492(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int)")
tk.MustExec("insert into t values (2, 20), (1, 10), (3, 30)")
tk.MustQuery("select a + 1 as field1, a as field2 from t order by field1, field2 limit 2").Check(testkit.Rows("2 1", "3 2"))
}
func (s testSuite) TestTrackAggMemoryUsage(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("set tidb_track_aggregate_memory_usage = off;")
rows := tk.MustQuery("explain analyze select /*+ HASH_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Equals, "N/A")
rows = tk.MustQuery("explain analyze select /*+ STREAM_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Equals, "N/A")
tk.MustExec("set tidb_track_aggregate_memory_usage = on;")
rows = tk.MustQuery("explain analyze select /*+ HASH_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Not(Equals), "N/A")
rows = tk.MustQuery("explain analyze select /*+ STREAM_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Not(Equals), "N/A")
}
func (s *testSuite) Test12201(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists e")
tk.MustExec("create table e (e enum('a', 'b'))")
tk.MustExec("insert into e values ('a'), ('b')")
tk.MustQuery("select * from e where case 1 when 0 then e end").Check(testkit.Rows())
tk.MustQuery("select * from e where case 1 when 1 then e end").Check(testkit.Rows("a", "b"))
tk.MustQuery("select * from e where case e when 1 then e end").Check(testkit.Rows("a"))
tk.MustQuery("select * from e where case 1 when e then e end").Check(testkit.Rows("a"))
}
func (s *testSuite) TestIssue21451(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (en enum('c', 'b', 'a'));")
tk.MustExec("insert into t values ('a'), ('b'), ('c');")
tk.MustQuery("select max(en) from t;").Check(testkit.Rows("c"))
tk.MustQuery("select min(en) from t;").Check(testkit.Rows("a"))
tk.MustQuery("select * from t order by en;").Check(testkit.Rows("c", "b", "a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(s set('c', 'b', 'a'));")
tk.MustExec("insert into t values ('a'), ('b'), ('c');")
tk.MustQuery("select max(s) from t;").Check(testkit.Rows("c"))
tk.MustQuery("select min(s) from t;").Check(testkit.Rows("a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(id int, en enum('c', 'b', 'a'))")
tk.MustExec("insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c');")
tk.MustQuery("select id, max(en) from t where id=1 group by id;").Check(testkit.Rows("1 c"))
tk.MustQuery("select id, min(en) from t where id=1 group by id;").Check(testkit.Rows("1 a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(id int, s set('c', 'b', 'a'));")
tk.MustExec("insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c');")
tk.MustQuery("select id, max(s) from t where id=1 group by id;").Check(testkit.Rows("1 c"))
tk.MustQuery("select id, min(s) from t where id=1 group by id;").Check(testkit.Rows("1 a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(e enum('e','d','c','b','a'))")
tk.MustExec("insert into t values ('e'),('d'),('c'),('b'),('a');")
tk.MustQuery("select * from t order by e limit 1;").Check(testkit.Rows("e"))
tk.MustExec("drop table t")
tk.MustExec("create table t(s set('e', 'd', 'c', 'b', 'a'))")
tk.MustExec("insert into t values ('e'),('d'),('c'),('b'),('a');")
tk.MustQuery("select * from t order by s limit 1;").Check(testkit.Rows("e"))
tk.MustExec("drop table t")
}
func (s *testSuite) TestIssue15563(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select distinct 0.7544678906163867 / 0.68234634;").Check(testkit.Rows("1.10569639842486251190"))
}
func (s *testSuite) TestIssue22231(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_issue_22231")
tk.MustExec("create table t_issue_22231(a datetime)")
tk.MustExec("insert into t_issue_22231 values('2020--05-20 01:22:12')")
tk.MustQuery("select * from t_issue_22231 where a >= '2020-05-13 00:00:00 00:00:00' and a <= '2020-05-28 23:59:59 00:00:00'").Check(testkit.Rows("2020-05-20 01:22:12"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-05-13 00:00:00 00:00:00'", "Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00'"))
tk.MustQuery("select cast('2020-10-22 10:31-10:12' as datetime)").Check(testkit.Rows("2020-10-22 10:31:10"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-10-22 10:31-10:12'"))
tk.MustQuery("select cast('2020-05-28 23:59:59 00:00:00' as datetime)").Check(testkit.Rows("2020-05-28 23:59:59"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00'"))
tk.MustExec("drop table if exists t_issue_22231")
}
func (s *testSuite) TestIssue22201(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustQuery("SELECT HEX(WEIGHT_STRING('ab' AS BINARY(1000000000000000000)));").Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1301 Result of cast_as_binary() was larger than max_allowed_packet (67108864) - truncated"))
tk.MustQuery("SELECT HEX(WEIGHT_STRING('ab' AS char(1000000000000000000)));").Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1301 Result of weight_string() was larger than max_allowed_packet (67108864) - truncated"))
}
func (s *testSuiteP1) TestIssue22941(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists m, mp")
tk.MustExec(`CREATE TABLE m (
mid varchar(50) NOT NULL,
ParentId varchar(50) DEFAULT NULL,
PRIMARY KEY (mid),
KEY ind_bm_parent (ParentId,mid)
)`)
// mp should have more columns than m
tk.MustExec(`CREATE TABLE mp (
mpid bigint(20) unsigned NOT NULL DEFAULT '0',
mid varchar(50) DEFAULT NULL COMMENT '模块主键',
sid int,
PRIMARY KEY (mpid)
);`)
tk.MustExec(`insert into mp values("1","1","0");`)
tk.MustExec(`insert into m values("0", "0");`)
rs := tk.MustQuery(`SELECT ( SELECT COUNT(1) FROM m WHERE ParentId = c.mid ) expand, bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL, sid FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'`)
rs.Check(testkit.Rows("1 <nil> 1 0 <nil>"))
rs = tk.MustQuery(`SELECT bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'`)
rs.Check(testkit.Rows("<nil> 1 0"))
}
func (s *testSerialSuite) TestTxnWriteThroughputSLI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int key, b int)")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput", "return(true)"), IsNil)
defer func() {
err := failpoint.Disable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput")
c.Assert(err, IsNil)
}()
mustExec := func(sql string) {
tk.MustExec(sql)
tk.Se.GetTxnWriteThroughputSLI().FinishExecuteStmt(time.Second, tk.Se.AffectedRows(), tk.Se.GetSessionVars().InTxn())
}
errExec := func(sql string) {
_, err := tk.Exec(sql)
c.Assert(err, NotNil)
tk.Se.GetTxnWriteThroughputSLI().FinishExecuteStmt(time.Second, tk.Se.AffectedRows(), tk.Se.GetSessionVars().InTxn())
}
// Test insert in small txn
mustExec("insert into t values (1,3),(2,4)")
writeSLI := tk.Se.GetTxnWriteThroughputSLI()
c.Assert(writeSLI.IsInvalid(), Equals, false)
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 2, writeSize: 58, readKeys: 0, writeKeys: 2, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test insert ... select ... from
mustExec("insert into t select b, a from t")
c.Assert(writeSLI.IsInvalid(), Equals, true)
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: true, affectRow: 2, writeSize: 58, readKeys: 0, writeKeys: 2, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test for delete
mustExec("delete from t")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 4, writeSize: 76, readKeys: 0, writeKeys: 4, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test insert not in small txn
mustExec("begin")
for i := 0; i < 20; i++ {
mustExec(fmt.Sprintf("insert into t values (%v,%v)", i, i))
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
}
// The statement which affect rows is 0 shouldn't record into time.
mustExec("select count(*) from t")
mustExec("select * from t")
mustExec("insert into t values (20,20)")
c.Assert(writeSLI.IsSmallTxn(), Equals, false)
mustExec("commit")
c.Assert(writeSLI.IsInvalid(), Equals, false)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 21, writeSize: 609, readKeys: 0, writeKeys: 21, writeTime: 22s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test invalid when transaction has replace ... select ... from ... statement.
mustExec("delete from t")
tk.Se.GetTxnWriteThroughputSLI().Reset()
mustExec("begin")
mustExec("insert into t values (1,3),(2,4)")
mustExec("replace into t select b, a from t")
mustExec("commit")
c.Assert(writeSLI.IsInvalid(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: true, affectRow: 4, writeSize: 116, readKeys: 0, writeKeys: 4, writeTime: 3s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test clean last failed transaction information.
err := failpoint.Disable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput")
c.Assert(err, IsNil)
mustExec("begin")
mustExec("insert into t values (1,3),(2,4)")
errExec("commit")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 0, writeSize: 0, readKeys: 0, writeKeys: 0, writeTime: 0s")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput", "return(true)"), IsNil)
mustExec("begin")
mustExec("insert into t values (5, 6)")
mustExec("commit")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 1, writeSize: 29, readKeys: 0, writeKeys: 1, writeTime: 2s")
// Test for reset
tk.Se.GetTxnWriteThroughputSLI().Reset()
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 0, writeSize: 0, readKeys: 0, writeKeys: 0, writeTime: 0s")
}
func (s *testSuite) TestIssue23993(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// Real cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a double)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// Int cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a int)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// Decimal cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a decimal)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// String cast to time should not return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a varchar(255))")
tk.MustExec("insert into t_issue_23993 values('-790822912')")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("-838:59:59"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows("-790822912"))
}
func (s *testSuiteP2) TestProjectionBitType(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) clustered);")
tk.MustExec("create table t1(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) nonclustered);")
tk.MustExec("insert into t(k1) select 1;")
tk.MustExec("insert into t1(k1) select 1;")
tk.MustExec("set @@tidb_enable_vectorized_expression = 0;")
// following SQL should returns same result
tk.MustQuery("(select * from t where false) union(select * from t for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustQuery("(select * from t1 where false) union(select * from t1 for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustExec("set @@tidb_enable_vectorized_expression = 1;")
tk.MustQuery("(select * from t where false) union(select * from t for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustQuery("(select * from t1 where false) union(select * from t1 for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
}
func (s *testSuite) TestIssue23609(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE `t1` (\n `a` timestamp NULL DEFAULT NULL,\n `b` year(4) DEFAULT NULL,\n KEY `a` (`a`),\n KEY `b` (`b`)\n)")
tk.MustExec("insert into t1 values(\"2002-10-03 04:28:53\",2000), (\"2002-10-03 04:28:53\",2002), (NULL, 2002)")
tk.MustQuery("select /*+ inl_join (x,y) */ * from t1 x cross join t1 y on x.a=y.b").Check(testkit.Rows())
tk.MustQuery("select * from t1 x cross join t1 y on x.a>y.b order by x.a, x.b, y.a, y.b").Check(testkit.Rows("2002-10-03 04:28:53 2000 <nil> 2002", "2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2000", "2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2002", "2002-10-03 04:28:53 2002 <nil> 2002", "2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2000", "2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2002"))
tk.MustQuery("select * from t1 where a = b").Check(testkit.Rows())
tk.MustQuery("select * from t1 where a < b").Check(testkit.Rows())
c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(0))
}
func (s *testSuite1) TestIssue24091(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
defer tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int) partition by hash (a div 0) partitions 10;")
tk.MustExec("insert into t values (NULL);")
tk.MustQuery("select null div 0;").Check(testkit.Rows("<nil>"))
tk.MustQuery("select * from t;").Check(testkit.Rows("<nil>"))
}
func (s *testSerialSuite) TestIssue24210(c *C) {
tk := testkit.NewTestKit(c, s.store)
// for ProjectionExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockProjectionExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err := tk.Exec("select a from (select 1 as a, 2 as b) t")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock ProjectionExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockProjectionExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for HashAggExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockHashAggExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select sum(a) from (select 1 as a, 2 as b) t group by b")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock HashAggExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockHashAggExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for StreamAggExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockStreamAggExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select sum(a) from (select 1 as a, 2 as b) t")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock StreamAggExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockStreamAggExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for SelectionExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockSelectionExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select * from (select rand() as a) t where a > 0")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock SelectionExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockSelectionExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
}
| [
"\"log_level\""
]
| []
| [
"log_level"
]
| [] | ["log_level"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gokiting.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/github.com/kubernetes-incubator/service-catalog/cmd/svcat/plugin/install.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"github.com/kubernetes-incubator/service-catalog/cmd/svcat/command"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
)
type installCmd struct {
*command.Context
path string
svcatCmd *cobra.Command
}
// NewInstallCmd builds a "svcat install plugin" command
func NewInstallCmd(cxt *command.Context) *cobra.Command {
installCmd := &installCmd{Context: cxt}
cmd := &cobra.Command{
Use: "plugin",
Short: "Install svcat as a kubectl plugin",
Example: command.NormalizeExamples(`
svcat install plugin
svcat install plugin --plugins-path /tmp/kube/plugins
`),
RunE: func(cmd *cobra.Command, args []string) error {
return installCmd.run(cmd)
},
}
cmd.Flags().StringVarP(&installCmd.path, "plugins-path", "p", "",
"The installation path. Defaults to KUBECTL_PLUGINS_PATH, if defined, otherwise the plugins directory under the KUBECONFIG dir. In most cases, this is ~/.kube/plugins.")
cxt.Viper.BindEnv("plugins-path", EnvPluginPath)
return cmd
}
func (c *installCmd) run(cmd *cobra.Command) error {
c.svcatCmd = cmd.Root()
return c.install()
}
func (c *installCmd) install() error {
installPath := c.getInstallPath()
err := copyBinary(installPath)
if err != nil {
return err
}
manifest, err := c.generateManifest()
if err != nil {
return err
}
err = saveManifest(installPath, manifest)
if err != nil {
return err
}
fmt.Fprintf(c.Output, "Plugin has been installed to %s. Run kubectl plugin %s --help for help using the plugin.\n",
installPath, Name)
return nil
}
func (c *installCmd) getInstallPath() string {
pluginDir := c.getPluginsDir()
return filepath.Join(pluginDir, Name)
}
func (c *installCmd) getPluginsDir() string {
if c.path != "" {
return c.path
}
if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" {
kubeDir := filepath.Base(kubeconfig)
return filepath.Join(kubeDir, "plugins")
}
home := getUserHomeDir()
return filepath.Join(home, ".kube", "plugins")
}
func (c *installCmd) generateManifest() ([]byte, error) {
m := &Manifest{}
m.Load(c.svcatCmd)
contents, err := yaml.Marshal(m)
if err != nil {
return nil, fmt.Errorf("could not marshall the generated manifest (%s)", err)
}
return contents, nil
}
func copyBinary(installPath string) error {
err := os.MkdirAll(installPath, 0755)
if err != nil {
return fmt.Errorf("could not create installation directory %s (%s)", installPath, err)
}
srcBin, err := os.Executable()
if err != nil {
return fmt.Errorf("could not retrieve the path to the currently running program (%s)", err)
}
binName := Name + getFileExt()
destBin := filepath.Join(installPath, binName)
err = copyFile(srcBin, destBin)
if err != nil {
return fmt.Errorf("could not copy %s to %s (%s)", srcBin, destBin, err)
}
return nil
}
func copyFile(src, dest string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(dest, syscall.O_CREAT|syscall.O_WRONLY, 0755)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
if err != nil {
return err
}
return out.Close()
}
func saveManifest(installPath string, manifest []byte) error {
manifestPath := filepath.Join(installPath, "plugin.yaml")
err := ioutil.WriteFile(manifestPath, []byte(manifest), 0644)
if err != nil {
return fmt.Errorf("could not write the plugin manifest to %s (%s)", manifestPath, err)
}
return nil
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
src/send_slack_notification.py | import json
import shlex
import urllib2
import logging
import os
# Mapping CloudFormation status codes to colors for Slack message attachments
# Status codes from http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html
STATUS_COLORS = {
'CREATE_COMPLETE': 'good',
'CREATE_IN_PROGRESS': 'good',
'CREATE_FAILED': 'danger',
'DELETE_COMPLETE': 'good',
'DELETE_FAILED': 'danger',
'DELETE_IN_PROGRESS': 'good',
'ROLLBACK_COMPLETE': 'warning',
'ROLLBACK_FAILED': 'danger',
'ROLLBACK_IN_PROGRESS': 'warning',
'UPDATE_COMPLETE': 'good',
'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS': 'good',
'UPDATE_IN_PROGRESS': 'good',
'UPDATE_ROLLBACK_COMPLETE': 'warning',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS': 'warning',
'UPDATE_ROLLBACK_FAILED': 'danger',
'UPDATE_ROLLBACK_IN_PROGRESS': 'warning'
}
# List of properties from ths SNS message that will be included in a Slack message
SNS_PROPERTIES_FOR_SLACK = [
'Timestamp',
'StackName'
]
logger = logging.getLogger()
logger.setLevel(logging.INFO)
SLACK_CHANNEL = os.environ['SLACK_CHANNEL']
SLACK_MSG_TEXT = os.environ['SLACK_MSG_TEXT']
SLACK_MSG_USER = os.environ['SLACK_MSG_USER']
SLACK_MSG_EMOJI = os.environ['SLACK_MSG_EMOJI']
SLACK_WEB_HOOK_URL = os.environ['SLACK_WEB_HOOK_URL']
def handler(event, context):
# records = event['Records']
# first_record = records[0]
# sns = first_record['Sns']
# sns_message = sns['Message']
sns_message = event['Records'][0]['Sns']['Message']
# using shlex to split the cfn message into a dictionary
cfn_msg_dict = dict(token.split('=', 1) for token in shlex.split(sns_message))
# ignore messages that do not pertain to the Stack as a whole
if not cfn_msg_dict['ResourceType'] == 'AWS::CloudFormation::Stack':
return
message_to_slack = get_message_for_slack(cfn_msg_dict)
data = json.dumps(message_to_slack)
req = urllib2.Request(SLACK_WEB_HOOK_URL, data, {'Content-Type': 'application/json'})
urllib2.urlopen(req)
return {'message': 'Notified'}
def get_message_for_slack(cfn_msg_dict):
attachment = get_attachment(cfn_msg_dict)
message_to_slack = {
'icon_emoji': SLACK_MSG_EMOJI,
'username': SLACK_MSG_USER,
'text': SLACK_MSG_TEXT,
'attachments': attachment,
'channel': SLACK_CHANNEL
}
return message_to_slack
def get_attachment(cfn_msg_dict):
title = "Stack: {} has reached status {}".format(cfn_msg_dict['StackName'], cfn_msg_dict['ResourceStatus'])
color = STATUS_COLORS.get(cfn_msg_dict['ResourceStatus'], '#000000')
attachment = [{
'fallback': SLACK_MSG_TEXT,
'title': title,
'fields': get_fields_for_attachment(cfn_msg_dict),
'color': color,
}]
return attachment
def get_fields_for_attachment(cfn_msg_dict):
fields = []
for k, v in cfn_msg_dict.items():
if k in SNS_PROPERTIES_FOR_SLACK:
fields.append({"title": k, "value": v, "short": "true"})
return fields
| []
| []
| [
"SLACK_WEB_HOOK_URL",
"SLACK_MSG_EMOJI",
"SLACK_CHANNEL",
"SLACK_MSG_USER",
"SLACK_MSG_TEXT"
]
| [] | ["SLACK_WEB_HOOK_URL", "SLACK_MSG_EMOJI", "SLACK_CHANNEL", "SLACK_MSG_USER", "SLACK_MSG_TEXT"] | python | 5 | 0 | |
main.go | package main
import (
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
)
func main() {
PORT := os.Getenv("PORT")
uri := os.Getenv("BB_PROXY_URI")
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Println("err:", err)
return
}
query, err := url.ParseQuery(string(body))
if err != nil {
log.Println("err:", err)
return
}
urlStr := uri + "?" + query.Encode()
log.Println(urlStr)
_, err = http.Get(urlStr)
if err != nil {
log.Println(err)
return
}
})
if err := http.ListenAndServe(":"+PORT, nil); err != nil {
log.Fatal(err)
}
}
| [
"\"PORT\"",
"\"BB_PROXY_URI\""
]
| []
| [
"PORT",
"BB_PROXY_URI"
]
| [] | ["PORT", "BB_PROXY_URI"] | go | 2 | 0 | |
device/robot/main.go | package main
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"strconv"
"time"
"gobot.io/x/gobot"
"gobot.io/x/gobot/drivers/aio"
"gobot.io/x/gobot/platforms/firmata"
)
func main() {
// Validating parameters (environment variables).
key := []byte(os.Getenv("ENCRYPTION_KEY"))
if len(key) != 32 {
log.Fatalf("ENCRYPTION_KEY must be 32-bytes long. Current key is \"%s\" which is %d bytes long.", key, len(key))
}
serverURL := os.Getenv("SERVER_URL")
if serverURL == "" {
log.Fatalf("SERVER_URL can not be empty.")
}
frequency, err := time.ParseDuration(os.Getenv("FREQUENCY"))
if err != nil {
log.Fatalf("Invalid FREQUENCY (\"%s\"):%q", os.Getenv("FREQUENCY"), err)
}
// Starting robot.
firmataAdaptor := firmata.NewTCPAdaptor(os.Args[1])
tempSensor := aio.NewGroveTemperatureSensorDriver(firmataAdaptor, "3")
robot := gobot.NewRobot("temperatureRobot",
[]gobot.Connection{firmataAdaptor},
[]gobot.Device{tempSensor},
func() {
gobot.Every(frequency, func() {
if err := send(serverURL, tempSensor.Temperature(), key); err != nil {
log.Println(err)
}
fmt.Println("Successfully sent: %f", tempSensor.Temperature())
})
},
)
robot.Start()
}
var client = &http.Client{
Timeout: time.Second * 50,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 50 * time.Second,
}).Dial,
TLSHandshakeTimeout: 50 * time.Second,
},
}
func send(u string, temp float64, key []byte) error {
e, err := encrypt([]byte(strconv.FormatFloat(temp, 'f', -1, 64)), key)
if err != nil {
return fmt.Errorf("Error encrypting temperature: %q", err)
}
resp, err := client.Post(u, "application/octet-stream", bytes.NewReader([]byte(e)))
if err != nil {
return fmt.Errorf("Error trying to send POST request: %q. URL:%s", err, u)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
b, _ := ioutil.ReadAll(resp.Body)
return fmt.Errorf("Invalid status code in POST request: %d. Message: %s", resp.StatusCode, string(b))
}
return nil
}
func encrypt(plaintext []byte, key []byte) ([]byte, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(c)
if err != nil {
return nil, err
}
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
return nil, err
}
return gcm.Seal(nonce, nonce, plaintext, nil), nil
}
| [
"\"ENCRYPTION_KEY\"",
"\"SERVER_URL\"",
"\"FREQUENCY\"",
"\"FREQUENCY\""
]
| []
| [
"FREQUENCY",
"SERVER_URL",
"ENCRYPTION_KEY"
]
| [] | ["FREQUENCY", "SERVER_URL", "ENCRYPTION_KEY"] | go | 3 | 0 | |
build/params_mainnet.go | // +build !debug
// +build !2k
// +build !testground
// +build !calibnet
// +build !nerpanet
// +build !butterflynet
// +build !interopnet
package build
import (
"math"
"os"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandIncentinet,
UpgradeSmokeHeight: DrandMainnet,
}
const BootstrappersFile = "mainnet.pi"
const GenesisFile = "mainnet.car"
const UpgradeBreezeHeight = 41280
const BreezeGasTampingDuration = 120
const UpgradeSmokeHeight = 51000
const UpgradeIgnitionHeight = 94000
const UpgradeRefuelHeight = 130800
const UpgradeAssemblyHeight = 138720
const UpgradeTapeHeight = 140760
// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
// Miners, clients, developers, custodians all need time to prepare.
// We still have upgrades and state changes to do, but can happen after signaling timing here.
const UpgradeLiftoffHeight = 148888
const UpgradeKumquatHeight = 170000
const UpgradeCalicoHeight = 265200
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
const UpgradeOrangeHeight = 336458
// 2020-12-22T02:00:00Z
var UpgradeClausHeight = abi.ChainEpoch(343200)
// 2021-03-04T00:00:30Z
const UpgradeTrustHeight = 550321
// 2021-04-12T22:00:00Z
const UpgradeNorwegianHeight = 665280
// 2021-04-29T06:00:00Z
const UpgradeTurboHeight = 712320
// 2021-06-30T22:00:00Z
var UpgradeHyperdriveHeight = abi.ChainEpoch(892800)
func init() {
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet)
}
if os.Getenv("LOTUS_DISABLE_HYPERDRIVE") == "1" {
UpgradeHyperdriveHeight = math.MaxInt64
}
Devnet = false
BuildType = BuildMainnet
}
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4
// we skip checks on message validity in this block to sidestep the zero-bls signature
var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")
| [
"\"LOTUS_USE_TEST_ADDRESSES\"",
"\"LOTUS_DISABLE_HYPERDRIVE\""
]
| []
| [
"LOTUS_DISABLE_HYPERDRIVE",
"LOTUS_USE_TEST_ADDRESSES"
]
| [] | ["LOTUS_DISABLE_HYPERDRIVE", "LOTUS_USE_TEST_ADDRESSES"] | go | 2 | 0 | |
classification/eval_main.py | # Copyright 2021 Fagner Cunha
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tool to evaluate classifiers.
Set the environment variable PYTHONHASHSEED to a reproducible value
before you start the python process to ensure that the model trains
or infers with reproducibility
"""
import json
import os
import random
from absl import app
from absl import flags
import numpy as np
from sklearn.metrics import accuracy_score
import tensorflow as tf
from iwildcamlib import CategoryMap
import bags
import dataloader
import geoprior
import model_builder
os.environ['TF_DETERMINISTIC_OPS'] = '1'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'model_name', default='efficientnet-b0',
help=('Model name of the archtecture'))
flags.DEFINE_integer(
'input_size', default=224,
help=('Input size of the model'))
flags.DEFINE_bool(
'use_bags', default=False,
help=('Use Balanced Group Softmax to train model'))
flags.DEFINE_integer(
'empty_class_id', default=0,
help=('Empty class id for balanced group softmax'))
flags.DEFINE_bool(
'use_full_image', default=False,
help=('Ignore bounding boxes and use full image'))
flags.DEFINE_integer(
'batch_size', default=32,
help=('Batch size used during training.'))
flags.DEFINE_string(
'ckpt_dir', default=None,
help=('Location of the model checkpoint files'))
flags.DEFINE_string(
'annotations_json', default=None,
help=('Path to json file containing the training annotations json for'
' the iWildCam2021 competition'))
flags.DEFINE_string(
'train_dataset_split', default=None,
help=('Path to json file containing the train/validation split based on'
' locations.'))
flags.DEFINE_string(
'test_info_json', default=None,
help=('Path to json file containing the test information json for'
' the iWildCam2021 competition'))
flags.DEFINE_string(
'dataset_dir', default=None,
help=('Path to directory containing test images.'))
flags.DEFINE_string(
'megadetector_results_json', default=None,
help=('Path to json file containing megadetector results.'))
flags.DEFINE_integer(
'log_frequence', default=500,
help=('Log prediction every n steps'))
flags.DEFINE_string(
'geo_prior_ckpt_dir', default=None,
help=('Location of the checkpoint files for the geo prior model'))
flags.DEFINE_integer(
'geo_prior_input_size', default=6,
help=('Input size for the geo prior model'))
flags.DEFINE_bool(
'use_bn_geo_prior', default=False,
help=('Include Batch Normalization to the geo prior model'))
flags.DEFINE_integer(
'embed_dim', default=256,
help=('Embedding dimension for geo prior model'))
if 'random_seed' not in list(FLAGS):
flags.DEFINE_integer(
'random_seed', default=42,
help=('Random seed for reproductible experiments'))
flags.mark_flag_as_required('ckpt_dir')
flags.mark_flag_as_required('annotations_json')
flags.mark_flag_as_required('test_info_json')
flags.mark_flag_as_required('dataset_dir')
flags.mark_flag_as_required('megadetector_results_json')
def load_train_validation_split():
if FLAGS.train_dataset_split is None:
return None, None
with tf.io.gfile.GFile(FLAGS.train_dataset_split, 'r') as json_file:
json_data = json.load(json_file)
return json_data['train'], json_data['validation']
def _load_model(num_classes, bal_group_softmax=None):
model = model_builder.create(model_name=FLAGS.model_name,
num_classes=num_classes,
input_size=FLAGS.input_size,
unfreeze_layers=0,
bags=bal_group_softmax)
checkpoint_path = os.path.join(FLAGS.ckpt_dir, "ckp")
model.load_weights(checkpoint_path)
if bal_group_softmax is not None:
model = bal_group_softmax.create_prediction_model(model)
return model
def _load_geo_prior_model(num_classes):
if FLAGS.geo_prior_ckpt_dir is not None:
rand_sample_generator = dataloader.RandSpatioTemporalGenerator()
geo_prior_model = geoprior.FCNet(
num_inputs=FLAGS.geo_prior_input_size,
embed_dim=FLAGS.embed_dim,
num_classes=num_classes,
use_bn=FLAGS.use_bn_geo_prior,
rand_sample_generator=rand_sample_generator)
checkpoint_path = os.path.join(FLAGS.geo_prior_ckpt_dir, "ckp")
geo_prior_model.load_weights(checkpoint_path)
return geo_prior_model
else:
return None
def _build_input_data(category_map):
include_geo_data = FLAGS.geo_prior_ckpt_dir is not None
input_data = dataloader.JsonWBBoxInputProcessor(
dataset_json=FLAGS.test_info_json,
dataset_dir=FLAGS.dataset_dir,
megadetector_results_json=FLAGS.megadetector_results_json,
batch_size=FLAGS.batch_size,
batch_drop_remainder=False,
category_map=category_map,
is_training=False,
output_size=FLAGS.input_size,
crop_mode='full' if FLAGS.use_full_image else 'bbox',
provide_validity_info_output=include_geo_data,
provide_coord_date_encoded_input=include_geo_data,
provide_instance_id=True,
seed=FLAGS.random_seed)
return input_data.make_source_dataset()
def mix_predictions(cnn_preds, prior_preds, valid):
valid = tf.expand_dims(valid, axis=-1)
return cnn_preds*prior_preds*valid + (1 - valid)*cnn_preds
def predict_w_geo_prior(batch, metadata, model, geo_prior_model):
cnn_input = batch[:-1]
prior_input = batch[-1]
label, valid, _ = metadata
cnn_preds = model(cnn_input, training=False)
prior_preds = geo_prior_model(prior_input, training=False)
preds = mix_predictions(cnn_preds, prior_preds, valid)
return label, preds
def _decode_one_hot(one_hot_tensor):
return tf.argmax(one_hot_tensor, axis=1).numpy()
def predict_classifier(model, geo_prior_model, dataset):
labels = []
predictions = []
count = 0
for batch, metadata in dataset:
if geo_prior_model is not None:
label, preds = predict_w_geo_prior(batch,
metadata,
model,
geo_prior_model)
else:
preds = model(batch, training=False)
label, _ = metadata
labels += list(_decode_one_hot(label))
predictions += list(_decode_one_hot(preds))
if count % FLAGS.log_frequence == 0:
tf.compat.v1.logging.info('Finished eval step %d' % count)
count += 1
return labels, predictions
def set_random_seeds():
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
tf.random.set_seed(FLAGS.random_seed)
def main(_):
set_random_seeds()
category_map = CategoryMap(FLAGS.annotations_json)
train_loc, _ = load_train_validation_split()
bal_group_softmax = bags.BalancedGroupSoftmax(
FLAGS.annotations_json,
category_map,
FLAGS.empty_class_id,
selected_locations=train_loc) if FLAGS.use_bags else None
dataset, _ = _build_input_data(category_map)
num_classes = category_map.get_num_classes()
model = _load_model(num_classes, bal_group_softmax)
geo_prior_model = _load_geo_prior_model(num_classes)
labels, predictions = predict_classifier(model, geo_prior_model, dataset)
accuracy = accuracy_score(labels, predictions)
print("Accuracy: %s" % accuracy)
if __name__ == '__main__':
app.run(main)
| []
| []
| [
"TF_DETERMINISTIC_OPS"
]
| [] | ["TF_DETERMINISTIC_OPS"] | python | 1 | 0 | |
acceptance/install/scenario2_test.go | package install_test
import (
"encoding/json"
"os"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/epinio/epinio/acceptance/helpers/catalog"
"github.com/epinio/epinio/acceptance/helpers/epinio"
"github.com/epinio/epinio/acceptance/helpers/proc"
"github.com/epinio/epinio/acceptance/helpers/route53"
"github.com/epinio/epinio/acceptance/testenv"
)
// This test uses AWS route53 to update the system domain's records
var _ = Describe("<Scenario2> GKE, Letsencrypt-staging, Zero instance", func() {
var (
flags []string
epinioHelper epinio.Epinio
appName = catalog.NewAppName()
loadbalancer string
domain string
zoneID string
instancesNum string
)
BeforeEach(func() {
epinioHelper = epinio.NewEpinioHelper(testenv.EpinioBinaryPath())
domain = os.Getenv("GKE_DOMAIN")
Expect(domain).ToNot(BeEmpty())
zoneID = os.Getenv("AWS_ZONE_ID")
Expect(zoneID).ToNot(BeEmpty())
instancesNum = "0"
flags = []string{
"--set", "domain=" + domain,
"--set", "tlsIssuer=letsencrypt-staging",
"--set", "skipCertManager=true",
}
})
AfterEach(func() {
out, err := epinioHelper.Uninstall()
Expect(err).NotTo(HaveOccurred(), out)
})
It("installs with letsencrypt-staging cert, custom domain and pushes an app with 0 instances", func() {
By("Installing CertManager", func() {
out, err := proc.RunW("helm", "repo", "add", "jetstack", "https://charts.jetstack.io")
Expect(err).NotTo(HaveOccurred(), out)
out, err = proc.RunW("helm", "repo", "update")
Expect(err).NotTo(HaveOccurred(), out)
out, err = proc.RunW("helm", "upgrade", "--install", "cert-manager", "jetstack/cert-manager",
"-n", "cert-manager",
"--create-namespace",
"--set", "installCRDs=true",
"--set", "extraArgs[0]=--enable-certificate-owner-ref=true",
)
Expect(err).NotTo(HaveOccurred(), out)
// Create certificate secret and cluster_issuer
out, err = proc.RunW("kubectl", "apply", "-f", testenv.TestAssetPath("letsencrypt-staging.yaml"))
Expect(err).NotTo(HaveOccurred(), out)
})
By("Installing Epinio", func() {
out, err := epinioHelper.Install(flags...)
Expect(err).NotTo(HaveOccurred(), out)
Expect(out).To(ContainSubstring("STATUS: deployed"))
out, err = testenv.PatchEpinio()
Expect(err).ToNot(HaveOccurred(), out)
})
By("Extracting Loadbalancer IP", func() {
out, err := proc.RunW("kubectl", "get", "service", "-n", "traefik", "traefik", "-o", "json")
Expect(err).NotTo(HaveOccurred(), out)
status := &testenv.LoadBalancerHostname{}
err = json.Unmarshal([]byte(out), status)
Expect(err).NotTo(HaveOccurred())
Expect(status.Status.LoadBalancer.Ingress).To(HaveLen(1))
loadbalancer = status.Status.LoadBalancer.Ingress[0].IP
Expect(loadbalancer).ToNot(BeEmpty())
})
By("Updating DNS Entries", func() {
change := route53.A(domain, loadbalancer, "UPSERT")
out, err := route53.Update(zoneID, change, nodeTmpDir)
Expect(err).NotTo(HaveOccurred(), out)
change = route53.A("*."+domain, loadbalancer, "UPSERT")
out, err = route53.Update(zoneID, change, nodeTmpDir)
Expect(err).NotTo(HaveOccurred(), out)
})
// Check that DNS entry is correctly propagated
By("Checking that DNS entry is correctly propagated", func() {
Eventually(func() string {
out, err := route53.TestDnsAnswer(zoneID, domain, "A")
Expect(err).NotTo(HaveOccurred(), out)
answer := &route53.DNSAnswer{}
err = json.Unmarshal([]byte(out), answer)
Expect(err).NotTo(HaveOccurred())
if len(answer.RecordData) == 0 {
return ""
}
return answer.RecordData[0]
}, "5m", "2s").Should(Equal(loadbalancer))
})
// Workaround to (try to!) ensure that the DNS is really propagated!
time.Sleep(3 * time.Minute)
By("Updating Epinio config", func() {
out, err := epinioHelper.Run("config", "update")
Expect(err).NotTo(HaveOccurred(), out)
Expect(out).To(ContainSubstring("Ok"))
})
By("Checking Epinio info command", func() {
Eventually(func() string {
out, _ := epinioHelper.Run("info")
return out
}, "2m", "2s").Should(ContainSubstring("Epinio Version:"))
})
By("Pushing an app with zero instances", func() {
out, err := epinioHelper.Run("push",
"--name", appName,
"--path", testenv.AssetPath("sample-app"),
"--instances", instancesNum)
Expect(err).ToNot(HaveOccurred(), out)
Eventually(func() string {
out, err := proc.RunW("kubectl", "get", "deployment", "--namespace", testenv.DefaultWorkspace, appName, "-o", "jsonpath={.spec.replicas}")
Expect(err).ToNot(HaveOccurred(), out)
return out
}, "30s", "1s").Should(Equal("0"))
// Verify cluster_issuer is used
out, err = proc.RunW("kubectl", "get", "certificate",
"-n", testenv.DefaultWorkspace,
"--selector", "app.kubernetes.io/name="+appName,
"-o", "jsonpath='{.items[*].spec.issuerRef.name}'")
Expect(err).NotTo(HaveOccurred(), out)
Expect(out).To(Equal("'letsencrypt-staging'"))
})
By("Cleaning DNS Entries", func() {
change := route53.A(domain, loadbalancer, "DELETE")
out, err := route53.Update(zoneID, change, nodeTmpDir)
Expect(err).NotTo(HaveOccurred(), out)
change = route53.A("*."+domain, loadbalancer, "DELETE")
out, err = route53.Update(zoneID, change, nodeTmpDir)
Expect(err).NotTo(HaveOccurred(), out)
})
})
})
| [
"\"GKE_DOMAIN\"",
"\"AWS_ZONE_ID\""
]
| []
| [
"AWS_ZONE_ID",
"GKE_DOMAIN"
]
| [] | ["AWS_ZONE_ID", "GKE_DOMAIN"] | go | 2 | 0 | |
cmd/tusd/cli/composer.go | package cli
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/tus/tusd/pkg/azurestore"
"github.com/tus/tusd/pkg/filelocker"
"github.com/tus/tusd/pkg/filestore"
"github.com/tus/tusd/pkg/gcsstore"
"github.com/tus/tusd/pkg/handler"
"github.com/tus/tusd/pkg/memorylocker"
"github.com/tus/tusd/pkg/s3store"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
var Composer *handler.StoreComposer
func CreateComposer() {
// Attempt to use S3 as a backend if the -s3-bucket option has been supplied.
// If not, we default to storing them locally on disk.
Composer = handler.NewStoreComposer()
if Flags.S3Bucket != "" {
s3Config := aws.NewConfig()
if Flags.S3TransferAcceleration {
s3Config = s3Config.WithS3UseAccelerate(true)
}
if Flags.S3DisableContentHashes {
// Prevent the S3 service client from automatically
// adding the Content-MD5 header to S3 Object Put and Upload API calls.
s3Config = s3Config.WithS3DisableContentMD5Validation(true)
}
if Flags.S3DisableSSL {
// Disable HTTPS and only use HTTP (helpful for debugging requests).
s3Config = s3Config.WithDisableSSL(true)
}
if Flags.S3Endpoint == "" {
if Flags.S3TransferAcceleration {
stdout.Printf("Using 's3://%s' as S3 bucket for storage with AWS S3 Transfer Acceleration enabled.\n", Flags.S3Bucket)
} else {
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)
}
} else {
stdout.Printf("Using '%s/%s' as S3 endpoint and bucket for storage.\n", Flags.S3Endpoint, Flags.S3Bucket)
s3Config = s3Config.WithEndpoint(Flags.S3Endpoint).WithS3ForcePathStyle(true)
}
// Derive credentials from default credential chain (env, shared, ec2 instance role)
// as per https://github.com/aws/aws-sdk-go#configuring-credentials
store := s3store.New(Flags.S3Bucket, s3.New(session.Must(session.NewSession()), s3Config))
store.ObjectPrefix = Flags.S3ObjectPrefix
store.PreferredPartSize = Flags.S3PartSize
store.DisableContentHashes = Flags.S3DisableContentHashes
store.UseIn(Composer)
locker := memorylocker.New()
locker.UseIn(Composer)
} else if Flags.GCSBucket != "" {
if Flags.GCSObjectPrefix != "" && strings.Contains(Flags.GCSObjectPrefix, "_") {
stderr.Fatalf("gcs-object-prefix value (%s) can't contain underscore. "+
"Please remove underscore from the value", Flags.GCSObjectPrefix)
}
// Derivce credentials from service account file path passed in
// GCS_SERVICE_ACCOUNT_FILE environment variable.
gcsSAF := os.Getenv("GCS_SERVICE_ACCOUNT_FILE")
if gcsSAF == "" {
stderr.Fatalf("No service account file provided for Google Cloud Storage using the GCS_SERVICE_ACCOUNT_FILE environment variable.\n")
}
service, err := gcsstore.NewGCSService(gcsSAF)
if err != nil {
stderr.Fatalf("Unable to create Google Cloud Storage service: %s\n", err)
}
stdout.Printf("Using 'gcs://%s' as GCS bucket for storage.\n", Flags.GCSBucket)
store := gcsstore.New(Flags.GCSBucket, service)
store.ObjectPrefix = Flags.GCSObjectPrefix
store.UseIn(Composer)
locker := memorylocker.New()
locker.UseIn(Composer)
} else if Flags.AzStorage != "" {
accountName := os.Getenv("AZURE_STORAGE_ACCOUNT")
if accountName == "" {
stderr.Fatalf("No service account name for Azure BlockBlob Storage using the AZURE_STORAGE_ACCOUNT environment variable.\n")
}
accountKey := os.Getenv("AZURE_STORAGE_KEY")
if accountKey == "" {
stderr.Fatalf("No service account key for Azure BlockBlob Storage using the AZURE_STORAGE_KEY environment variable.\n")
}
azureEndpoint := Flags.AzEndpoint
// Enables support for using Azurite as a storage emulator without messing with proxies and stuff
// e.g. http://127.0.0.1:10000/devstoreaccount1
if azureEndpoint == "" {
azureEndpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
stdout.Printf("Custom Azure Endpoint not specified in flag variable azure-endpoint.\n"+
"Using endpoint %s\n", azureEndpoint)
} else {
stdout.Printf("Using Azure endpoint %s\n", azureEndpoint)
}
azConfig := &azurestore.AzConfig{
AccountName: accountName,
AccountKey: accountKey,
ContainerName: Flags.AzStorage,
ContainerAccessType: Flags.AzContainerAccessType,
BlobAccessTier: Flags.AzBlobAccessTier,
Endpoint: azureEndpoint,
}
azService, err := azurestore.NewAzureService(azConfig)
if err != nil {
stderr.Fatalf(err.Error())
}
store := azurestore.New(azService)
store.ObjectPrefix = Flags.AzObjectPrefix
store.Container = Flags.AzStorage
store.UseIn(Composer)
} else {
dir, err := filepath.Abs(Flags.UploadDir)
if err != nil {
stderr.Fatalf("Unable to make absolute path: %s", err)
}
stdout.Printf("Using '%s' as directory storage.\n", dir)
if err := os.MkdirAll(dir, os.FileMode(0774)); err != nil {
stderr.Fatalf("Unable to ensure directory exists: %s", err)
}
store := filestore.New(dir)
store.UseIn(Composer)
locker := filelocker.New(dir)
locker.UseIn(Composer)
}
stdout.Printf("Using %.2fMB as maximum size.\n", float64(Flags.MaxSize)/1024/1024)
}
| [
"\"GCS_SERVICE_ACCOUNT_FILE\"",
"\"AZURE_STORAGE_ACCOUNT\"",
"\"AZURE_STORAGE_KEY\""
]
| []
| [
"GCS_SERVICE_ACCOUNT_FILE",
"AZURE_STORAGE_KEY",
"AZURE_STORAGE_ACCOUNT"
]
| [] | ["GCS_SERVICE_ACCOUNT_FILE", "AZURE_STORAGE_KEY", "AZURE_STORAGE_ACCOUNT"] | go | 3 | 0 | |
test/get_previous_releases.py | #!/usr/bin/env python3
#
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Download or build previous releases.
# Needs curl and tar to download a release, or the build dependencies when
# building a release.
import argparse
import contextlib
from fnmatch import fnmatch
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
import hashlib
SHA256_SUMS = {
"a605473e985a0cc372c96cc7bc9f0b8c76bbdaa9c37cc169706c594c7abc62b3": "umkoin-0.19.1-aarch64-linux-gnu.tar.gz",
"908ea674e22400b0119e029680df8f5f57cbe07d18900501dd349ca7554077f8": "umkoin-0.19.1-arm-linux-gnueabihf.tar.gz",
"a82668eeb603c182377affbc8c29c0b352f73426ce675471b99ed6959bbd91d6": "umkoin-0.19.1-i686-pc-linux-gnu.tar.gz",
"20005082dd17a717f0d79ee234f7fd5bdff489e55895b7084b357eec4361bd26": "umkoin-0.19.1-osx64.tar.gz",
"ee3dddecfa5c858856f8006342c484b668f34d5d41f8f1d3237fbf3626a4f075": "umkoin-0.19.1-riscv64-linux-gnu.tar.gz",
"c8b1d803c03e52538d62759caed010e52924c5c0a4f4cf840199c200399dc628": "umkoin-0.19.1-x86_64-linux-gnu.tar.gz",
"71603c2015becc90c0efd91d6b4f91e1c2ae9a2344bd672e9beee01bce83b7b2": "umkoin-0.20.0-aarch64-linux-gnu.tar.gz",
"78afc01e0e8ccf8ca90b139b9891c3a5adbfecbf1111cca82e613462e3d841d6": "umkoin-0.20.0-arm-linux-gnueabihf.tar.gz",
"218bcea3eb9e42ce57ccfa26711827600a1b7164fbd02469513f11f023b29090": "umkoin-0.20.0-osx64.tar.gz",
"5df8ef0cc548c3a69b9e475403ed5bbb5cadba964c4c3d873f1fda96afcd15b1": "umkoin-0.20.0-riscv64-linux-gnu.tar.gz",
"c4d8896d53160ab039ab250d7e70c2822bb02aaaeac2f8f07fb73eff4635a87e": "umkoin-0.20.0-x86_64-linux-gnu.tar.gz",
"ad356f577f3fffe646ffbe73bd3655612f794e9cc57995984f9f88581ea6fbb3": "umkoin-0.20.1-aarch64-linux-gnu.tar.gz",
"2ef0bf4045ecdbd4fc34c1c818c0d4b5f52ca37709d71e8e0388f5272fb17214": "umkoin-0.20.1-arm-linux-gnueabihf.tar.gz",
"6f6411c8409e91b070f54edf76544cdc85cfd2b9ffe0dba200fb68cddb1e3010": "umkoin-0.20.1-osx64.tar.gz",
"e55c32e91800156032dcc2ff9bc654df2068b46bab24e63328755a1c2fd588e2": "umkoin-0.20.1-riscv64-linux-gnu.tar.gz",
"2430e4c813ea0de28ef939170e05a36eaa1d2031589abe3b32491d5835f7b70e": "umkoin-0.20.1-x86_64-linux-gnu.tar.gz"
}
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
def download_binary(tag, args) -> int:
if Path(tag).is_dir():
if not args.remove_dir:
print('Using cached {}'.format(tag))
return 0
shutil.rmtree(tag)
Path(tag).mkdir()
bin_path = 'bin/umkoin-core-{}'.format(tag[1:])
match = re.compile('v(.*)(rc[0-9]+)$').search(tag)
if match:
bin_path = 'bin/umkoin-core-{}/test.{}'.format(
match.group(1), match.group(2))
tarball = 'umkoin-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=args.platform)
tarballUrl = 'http://www.umkoin.org/{bin_path}/{tarball}'.format(
bin_path=bin_path, tarball=tarball)
print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl))
header, status = subprocess.Popen(
['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate()
if re.search("404 Not Found", header.decode("utf-8")):
print("Binary tag was not found")
return 1
curlCmds = [
['curl', '--remote-name', tarballUrl]
]
for cmd in curlCmds:
ret = subprocess.run(cmd).returncode
if ret:
return ret
hasher = hashlib.sha256()
with open(tarball, "rb") as afile:
hasher.update(afile.read())
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
if tarball in SHA256_SUMS.values():
print("Checksum did not match")
return 1
print("Checksum for given version doesn't exist")
return 1
print("Checksum matched")
# Extract tarball
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
'--strip-components=1',
'umkoin-{tag}'.format(tag=tag[1:])]).returncode
if ret:
return ret
Path(tarball).unlink()
return 0
def build_release(tag, args) -> int:
githubUrl = "https://github.com/umkoin/umkoin"
if args.remove_dir:
if Path(tag).is_dir():
shutil.rmtree(tag)
if not Path(tag).is_dir():
# fetch new tags
subprocess.run(
["git", "fetch", githubUrl, "--tags"])
output = subprocess.check_output(['git', 'tag', '-l', tag])
if not output:
print('Tag {} not found'.format(tag))
return 1
ret = subprocess.run([
'git', 'clone', githubUrl, tag
]).returncode
if ret:
return ret
with pushd(tag):
ret = subprocess.run(['git', 'checkout', tag]).returncode
if ret:
return ret
host = args.host
if args.depends:
with pushd('depends'):
ret = subprocess.run(['make', 'NO_QT=1']).returncode
if ret:
return ret
host = os.environ.get(
'HOST', subprocess.check_output(['./config.guess']))
config_flags = '--prefix={pwd}/depends/{host} '.format(
pwd=os.getcwd(),
host=host) + args.config_flags
cmds = [
'./autogen.sh',
'./configure {}'.format(config_flags),
'make',
]
for cmd in cmds:
ret = subprocess.run(cmd.split()).returncode
if ret:
return ret
# Move binaries, so they're in the same place as in the
# release download
Path('bin').mkdir(exist_ok=True)
files = ['umkoind', 'umkoin-cli', 'umkoin-tx']
for f in files:
Path('src/'+f).rename('bin/'+f)
return 0
def check_host(args) -> int:
args.host = os.environ.get('HOST', subprocess.check_output(
'./depends/config.guess').decode())
if args.download_binary:
platforms = {
'aarch64-*-linux*': 'aarch64-linux-gnu',
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'osx64',
'aarch64-apple-darwin*': 'osx64',
}
args.platform = ''
for pattern, target in platforms.items():
if fnmatch(args.host, pattern):
args.platform = target
if not args.platform:
print('Not sure which binary to download for {}'.format(args.host))
return 1
return 0
def main(args) -> int:
Path(args.target_dir).mkdir(exist_ok=True, parents=True)
print("Releases directory: {}".format(args.target_dir))
ret = check_host(args)
if ret:
return ret
if args.download_binary:
with pushd(args.target_dir):
for tag in args.tags:
ret = download_binary(tag, args)
if ret:
return ret
return 0
args.config_flags = os.environ.get('CONFIG_FLAGS', '')
args.config_flags += ' --without-gui --disable-tests --disable-bench'
with pushd(args.target_dir):
for tag in args.tags:
ret = build_release(tag, args)
if ret:
return ret
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--remove-dir', action='store_true',
help='remove existing directory.')
parser.add_argument('-d', '--depends', action='store_true',
help='use depends.')
parser.add_argument('-b', '--download-binary', action='store_true',
help='download release binary.')
parser.add_argument('-t', '--target-dir', action='store',
help='target directory.', default='releases')
parser.add_argument('tags', nargs='+',
help="release tags. e.g.: v0.18.1 v0.20.0rc2")
args = parser.parse_args()
sys.exit(main(args))
| []
| []
| [
"HOST",
"CONFIG_FLAGS"
]
| [] | ["HOST", "CONFIG_FLAGS"] | python | 2 | 0 | |
code/train.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import argparse
import json
import pandas as pd
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, Callback, TensorBoard
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import recall_score, precision_score, classification_report, accuracy_score, confusion_matrix, f1_score
import source.custom_layer as custlay
import source.bert_preprocessing as berpre
import source.postprocessing as postpro
import source.sentence_preprocessing as senpre
def main(args):
#To-do:
#-Fix loggin bug and switch all prints to loggers
print("Container structure:")
model_dir = args.container_model_dir
print("internal docker model_dir:", model_dir)
print("epochs: ", args.epochs)
print("batch size: ", args.batch_size)
MAX_SEQUENCE_LENGTH = args.max_sequence_length
print("saving parameters necessary for inference")
f = open(os.path.join(model_dir, "max_sequence_length.txt"),"w")
f.write(str(MAX_SEQUENCE_LENGTH))
f.close()
f = open(os.path.join(model_dir, "bert_path.txt"),"w")
f.write(str(args.bert_path))
f.close()
print("getting data")
train_data = pd.read_csv(os.path.join(args.train, 'train.csv'), engine='python')
val_data = pd.read_csv(os.path.join(args.validation, 'val.csv'), engine='python')
test_data = pd.read_csv(os.path.join(args.eval, 'test.csv'), engine='python')
print("preprocessing data")
train_sentences = senpre.create_sentences_out_of_dataframe(train_data)
val_sentences = senpre.create_sentences_out_of_dataframe(val_data)
test_sentences = senpre.create_sentences_out_of_dataframe(test_data)
train_sentences = senpre.from_iob_to_io(train_sentences)
val_sentences = senpre.from_iob_to_io(val_sentences)
test_sentences = senpre.from_iob_to_io(test_sentences)
tags = set([item for sublist in train_sentences+test_sentences+val_sentences for _, item in sublist])
print("number of tags after IO conversion:", str(len(tags)))
tag2int = {}
int2tag = {}
for i, tag in enumerate(sorted(tags)):
tag2int[tag] = i+1
int2tag[i+1] = tag
# Special character for the tags
tag2int['-PAD-'] = 0
int2tag[0] = '-PAD-'
n_tags = len(tag2int)
print("saving tag2int and int2tag to directory")
j = json.dumps(tag2int)
f = open(os.path.join(model_dir, "tag2int.json"), "w")
f.write(j)
f.close()
j = json.dumps(int2tag)
f = open(os.path.join(model_dir, "int2tag.json"), "w")
f.write(j)
f.close()
print("splitting sentences")
train_sentences = senpre.split(train_sentences, MAX_SEQUENCE_LENGTH)
val_sentences = senpre.split(val_sentences, MAX_SEQUENCE_LENGTH)
test_sentences = senpre.split(test_sentences, MAX_SEQUENCE_LENGTH)
train_text = senpre.text_sequence(train_sentences)
test_text = senpre.text_sequence(test_sentences)
val_text = senpre.text_sequence(val_sentences)
train_label = senpre.tag_sequence(train_sentences)
test_label = senpre.tag_sequence(test_sentences)
val_label = senpre.tag_sequence(val_sentences)
# Instantiate tokenizer
print("instantiate bert tokenizer")
tokenizer = berpre.create_tokenizer_from_hub_module(args.bert_path)
# Convert data to InputExample format
print("convert data to bert examples")
train_examples = berpre.convert_text_to_examples(train_text, train_label)
test_examples = berpre.convert_text_to_examples(test_text, test_label)
val_examples = berpre.convert_text_to_examples(val_text, val_label)
# Convert to features
print("convert to bert features")
(train_input_ids, train_input_masks, train_segment_ids, train_labels_ids
) = berpre.convert_examples_to_features(tokenizer, train_examples, tag2int, max_seq_length=MAX_SEQUENCE_LENGTH+2)
(test_input_ids, test_input_masks, test_segment_ids, test_labels_ids
) = berpre.convert_examples_to_features(tokenizer, test_examples, tag2int, max_seq_length=MAX_SEQUENCE_LENGTH+2)
(val_input_ids, val_input_masks, val_segment_ids, val_labels_ids
) = berpre.convert_examples_to_features(tokenizer, val_examples, tag2int, max_seq_length=MAX_SEQUENCE_LENGTH+2)
# One-hot encode labels
print("convert labels to categorical")
train_labels = to_categorical(train_labels_ids, num_classes=n_tags)
test_labels = to_categorical(test_labels_ids, num_classes=n_tags)
val_labels = to_categorical(val_labels_ids, num_classes=n_tags)
print('bert tokenization over')
print("configuring model")
model = custlay.build_model(max_seq_length = MAX_SEQUENCE_LENGTH+2,
n_tags=n_tags,
lr=args.learning_rate,
drop_out=args.drop_out,
bert_path=args.bert_path
)
print("start training")
print("temporary weights will be saved to:", (os.path.join(model_dir, 'ner_model.h5')))
cp = ModelCheckpoint(filepath=os.path.join(model_dir, 'ner_model.h5'),
monitor='val_accuracy',
save_best_only=True,
save_weights_only=True,
verbose=1)
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 5)
history = model.fit([train_input_ids, train_input_masks, train_segment_ids],
train_labels,
validation_data=([val_input_ids, val_input_masks, val_segment_ids], val_labels),
epochs=args.epochs,
batch_size=args.batch_size,
shuffle=True,
verbose=1,
callbacks=[cp, early_stopping]
)
print("training over")
print("loading best h5 weights")
# Reload best saved checkpoint:
model.load_weights(os.path.join(model_dir, 'ner_model.h5'))
print("content of model_dir:", (os.path.join(model_dir)))
os.system(f'ls {model_dir}')
print("save best model to ProtoBuff and right directory for TensorFlow Serving")
# Note: This directory structure will need to be followed - see notes for the next section
model_version = '1'
export_dir = os.path.join(model_dir, 'model/', model_version)
model.save(export_dir)
print("saving done")
# Reporting test set performance
print("predicting on test set")
y_true = test_labels.argmax(-1)
y_pred = model.predict([test_input_ids, test_input_masks, test_segment_ids]).argmax(-1)
print("creating classification report")
out_true, out_pred = postpro.y2label_for_report(y_true, y_pred, int2tag, mask=0)
report = classification_report(out_true, out_pred, digits=4, output_dict=True)
report_df = pd.DataFrame(report).transpose()
print(report_df)
print("saving classification report to model directory")
report_df.to_csv(os.path.join(model_dir, "classification_report.csv"))
print('Removing h5 file as it is not used for Serving')
os.system(f'rm {model_dir}/ner_model.h5')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train',type=str,required=False,default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--validation',type=str,required=False,default=os.environ.get('SM_CHANNEL_VALIDATION'))
parser.add_argument('--eval',type=str,required=False,default=os.environ.get('SM_CHANNEL_EVAL'))
parser.add_argument('--container_model_dir',type=str,default=os.environ.get('SM_MODEL_DIR'), help='The directory where the model will be stored inside the docker. This folder is then compressed into a model.tar.gz sent to the s3 location associated with the training job')
parser.add_argument('--max_sequence_length',type=int, default=70)
parser.add_argument('--learning_rate',type=float,default=0.00004, help='Initial learning rate.')
parser.add_argument('--epochs',type=int, default=50)
parser.add_argument('--batch_size',type=int, default=16)
parser.add_argument('--drop_out',type=float, default=0.0)
parser.add_argument('--bert_path',type=str, default='https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3')
args, _ = parser.parse_known_args()
main(args)
| []
| []
| [
"SM_CHANNEL_TRAIN",
"SM_CHANNEL_VALIDATION",
"SM_MODEL_DIR",
"SM_CHANNEL_EVAL"
]
| [] | ["SM_CHANNEL_TRAIN", "SM_CHANNEL_VALIDATION", "SM_MODEL_DIR", "SM_CHANNEL_EVAL"] | python | 4 | 0 | |
core/commands/config.go | package commands
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
cmds "github.com/djbarber/ipfs-hack/commands"
repo "github.com/djbarber/ipfs-hack/repo"
config "github.com/djbarber/ipfs-hack/repo/config"
fsrepo "github.com/djbarber/ipfs-hack/repo/fsrepo"
u "github.com/djbarber/ipfs-hack/util"
)
type ConfigField struct {
Key string
Value interface{}
}
var ConfigCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "get and set IPFS config values",
Synopsis: `
ipfs config <key> - Get value of <key>
ipfs config <key> <value> - Set value of <key> to <value>
ipfs config show - Show config file
ipfs config edit - Edit config file in $EDITOR
ipfs config replace <file> - Replaces the config file with <file>
`,
ShortDescription: `
ipfs config controls configuration variables. It works like 'git config'.
The configuration values are stored in a config file inside your IPFS
repository.`,
LongDescription: `
ipfs config controls configuration variables. It works
much like 'git config'. The configuration values are stored in a config
file inside your IPFS repository.
EXAMPLES:
Get the value of the 'datastore.path' key:
ipfs config datastore.path
Set the value of the 'datastore.path' key:
ipfs config datastore.path ~/.ipfs/datastore
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, false, "The key of the config entry (e.g. \"Addresses.API\")"),
cmds.StringArg("value", false, false, "The value to set the config entry to"),
},
Options: []cmds.Option{
cmds.BoolOption("bool", "Set a boolean value"),
cmds.BoolOption("json", "Parse stringified JSON"),
},
Run: func(req cmds.Request, res cmds.Response) {
args := req.Arguments()
key := args[0]
r, err := fsrepo.Open(req.InvocContext().ConfigRoot)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
defer r.Close()
var output *ConfigField
if len(args) == 2 {
value := args[1]
if parseJson, _, _ := req.Option("json").Bool(); parseJson {
var jsonVal interface{}
if err := json.Unmarshal([]byte(value), &jsonVal); err != nil {
err = fmt.Errorf("failed to unmarshal json. %s", err)
res.SetError(err, cmds.ErrNormal)
return
}
output, err = setConfig(r, key, jsonVal)
} else if isbool, _, _ := req.Option("bool").Bool(); isbool {
output, err = setConfig(r, key, value == "true")
} else {
output, err = setConfig(r, key, value)
}
} else {
output, err = getConfig(r, key)
}
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
res.SetOutput(output)
},
Marshalers: cmds.MarshalerMap{
cmds.Text: func(res cmds.Response) (io.Reader, error) {
if len(res.Request().Arguments()) == 2 {
return nil, nil // dont output anything
}
v := res.Output()
if v == nil {
k := res.Request().Arguments()[0]
return nil, fmt.Errorf("config does not contain key: %s", k)
}
vf, ok := v.(*ConfigField)
if !ok {
return nil, u.ErrCast()
}
buf, err := config.HumanOutput(vf.Value)
if err != nil {
return nil, err
}
buf = append(buf, byte('\n'))
return bytes.NewReader(buf), nil
},
},
Type: ConfigField{},
Subcommands: map[string]*cmds.Command{
"show": configShowCmd,
"edit": configEditCmd,
"replace": configReplaceCmd,
},
}
var configShowCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Outputs the content of the config file",
ShortDescription: `
WARNING: Your private key is stored in the config file, and it will be
included in the output of this command.
`,
},
Run: func(req cmds.Request, res cmds.Response) {
filename, err := config.Filename(req.InvocContext().ConfigRoot)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
output, err := showConfig(filename)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
res.SetOutput(output)
},
}
var configEditCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Opens the config file for editing in $EDITOR",
ShortDescription: `
To use 'ipfs config edit', you must have the $EDITOR environment
variable set to your preferred text editor.
`,
},
Run: func(req cmds.Request, res cmds.Response) {
filename, err := config.Filename(req.InvocContext().ConfigRoot)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
err = editConfig(filename)
if err != nil {
res.SetError(err, cmds.ErrNormal)
}
},
}
var configReplaceCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Replaces the config with <file>",
ShortDescription: `
Make sure to back up the config file first if neccessary, this operation
can't be undone.
`,
},
Arguments: []cmds.Argument{
cmds.FileArg("file", true, false, "The file to use as the new config"),
},
Run: func(req cmds.Request, res cmds.Response) {
r, err := fsrepo.Open(req.InvocContext().ConfigRoot)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
defer r.Close()
file, err := req.Files().NextFile()
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
defer file.Close()
err = replaceConfig(r, file)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
},
}
func getConfig(r repo.Repo, key string) (*ConfigField, error) {
value, err := r.GetConfigKey(key)
if err != nil {
return nil, fmt.Errorf("Failed to get config value: %s", err)
}
return &ConfigField{
Key: key,
Value: value,
}, nil
}
func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) {
err := r.SetConfigKey(key, value)
if err != nil {
return nil, fmt.Errorf("Failed to set config value: %s (maybe use --json?)", err)
}
return getConfig(r, key)
}
func showConfig(filename string) (io.Reader, error) {
// TODO maybe we should omit privkey so we don't accidentally leak it?
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return bytes.NewReader(data), nil
}
func editConfig(filename string) error {
editor := os.Getenv("EDITOR")
if editor == "" {
return errors.New("ENV variable $EDITOR not set")
}
cmd := exec.Command("sh", "-c", editor+" "+filename)
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
return cmd.Run()
}
func replaceConfig(r repo.Repo, file io.Reader) error {
var cfg config.Config
if err := json.NewDecoder(file).Decode(&cfg); err != nil {
return errors.New("Failed to decode file as config")
}
return r.SetConfig(&cfg)
}
| [
"\"EDITOR\""
]
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | go | 1 | 0 | |
tools/azure-sdk-tools/devtools_testutils/azure_testcase.py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import functools
import inspect
import logging
import os.path
import sys
import zlib
try:
from inspect import getfullargspec as get_arg_spec
except ImportError:
from inspect import getargspec as get_arg_spec
import pytest
from dotenv import load_dotenv, find_dotenv
from azure_devtools.scenario_tests import (
ReplayableTest,
AzureTestError,
GeneralNameReplacer,
RequestUrlNormalizer,
AuthenticationMetadataFilter,
OAuthRequestResponsesFilter,
)
from azure_devtools.scenario_tests.config import TestConfig
from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function
from .config import TEST_SETTING_FILENAME
from . import mgmt_settings_fake as fake_settings
try:
# Try to import the AsyncFakeCredential, if we cannot assume it is Python 2
from .fake_async_credential import AsyncFakeCredential
except SyntaxError:
pass
class HttpStatusCode(object):
OK = 200
Created = 201
Accepted = 202
NoContent = 204
NotFound = 404
def get_resource_name(name_prefix, identifier):
# Append a suffix to the name, based on the fully qualified test name
# We use a checksum of the test name so that each test gets different
# resource names, but each test will get the same name on repeat runs,
# which is needed for playback.
# Most resource names have a length limit, so we use a crc32
checksum = zlib.adler32(identifier) & 0xFFFFFFFF
name = "{}{}".format(name_prefix, hex(checksum)[2:]).rstrip("L")
if name.endswith("L"):
name = name[:-1]
return name
def get_qualified_method_name(obj, method_name):
# example of qualified test name:
# test_mgmt_network.test_public_ip_addresses
_, filename = os.path.split(inspect.getsourcefile(type(obj)))
module_name, _ = os.path.splitext(filename)
return "{0}.{1}".format(module_name, method_name)
def is_live():
"""A module version of is_live, that could be used in pytest marker."""
if not hasattr(is_live, "_cache"):
config_file = os.path.join(os.path.dirname(__file__), TEST_SETTING_FILENAME)
if not os.path.exists(config_file):
config_file = None
is_live._cache = TestConfig(config_file=config_file).record_mode
return is_live._cache
def get_region_override(default="westus"):
region = os.environ.get("RESOURCE_REGION", None) or default
if not region:
raise ValueError(
"Region should not be None; set a non-empty-string region to either the RESOURCE_REGION environment variable or the default parameter to this function."
)
return region
def _is_autorest_v3(client_class):
"""IS this client a autorestv3/track2 one?.
Could be refined later if necessary.
"""
args = get_arg_spec(client_class.__init__).args
return "credential" in args
class AzureTestCase(ReplayableTest):
def __init__(
self,
method_name,
config_file=None,
recording_dir=None,
recording_name=None,
recording_processors=None,
replay_processors=None,
recording_patches=None,
replay_patches=None,
**kwargs
):
self.working_folder = os.path.dirname(__file__)
self.qualified_test_name = get_qualified_method_name(self, method_name)
self._fake_settings, self._real_settings = self._load_settings()
self.scrubber = GeneralNameReplacer()
config_file = config_file or os.path.join(
self.working_folder, TEST_SETTING_FILENAME
)
if not os.path.exists(config_file):
config_file = None
load_dotenv(find_dotenv())
super(AzureTestCase, self).__init__(
method_name,
config_file=config_file,
recording_dir=recording_dir,
recording_name=recording_name or self.qualified_test_name,
recording_processors=recording_processors
or self._get_recording_processors(),
replay_processors=replay_processors or self._get_replay_processors(),
recording_patches=recording_patches,
replay_patches=replay_patches,
**kwargs
)
@property
def settings(self):
if self.is_live:
if self._real_settings:
return self._real_settings
else:
raise AzureTestError(
"Need a mgmt_settings_real.py file to run tests live."
)
else:
return self._fake_settings
def _load_settings(self):
try:
from . import mgmt_settings_real as real_settings
return fake_settings, real_settings
except ImportError:
return fake_settings, None
def _get_recording_processors(self):
return [
self.scrubber,
AuthenticationMetadataFilter(),
OAuthRequestResponsesFilter(),
RequestUrlNormalizer(),
]
def _get_replay_processors(self):
return [RequestUrlNormalizer()]
def is_playback(self):
return not self.is_live
def get_settings_value(self, key):
key_value = os.environ.get("AZURE_" + key, None)
if (
key_value
and self._real_settings
and getattr(self._real_settings, key) != key_value
):
raise ValueError(
"You have both AZURE_{key} env variable and mgmt_settings_real.py for {key} to different values".format(
key=key
)
)
if not key_value:
try:
key_value = getattr(self.settings, key)
except Exception:
print("Could not get {}".format(key))
raise
return key_value
def set_value_to_scrub(self, key, default_value):
if self.is_live:
value = self.get_settings_value(key)
self.scrubber.register_name_pair(value, default_value)
return value
else:
return default_value
def setUp(self):
# Every test uses a different resource group name calculated from its
# qualified test name.
#
# When running all tests serially, this allows us to delete
# the resource group in teardown without waiting for the delete to
# complete. The next test in line will use a different resource group,
# so it won't have any trouble creating its resource group even if the
# previous test resource group hasn't finished deleting.
#
# When running tests individually, if you try to run the same test
# multiple times in a row, it's possible that the delete in the previous
# teardown hasn't completed yet (because we don't wait), and that
# would make resource group creation fail.
# To avoid that, we also delete the resource group in the
# setup, and we wait for that delete to complete.
super(AzureTestCase, self).setUp()
def tearDown(self):
return super(AzureTestCase, self).tearDown()
def get_credential(self, client_class, **kwargs):
tenant_id = os.environ.get(
"AZURE_TENANT_ID", getattr(self._real_settings, "TENANT_ID", None)
)
client_id = os.environ.get(
"AZURE_CLIENT_ID", getattr(self._real_settings, "CLIENT_ID", None)
)
secret = os.environ.get(
"AZURE_CLIENT_SECRET", getattr(self._real_settings, "CLIENT_SECRET", None)
)
is_async = kwargs.pop("is_async", False)
if tenant_id and client_id and secret and self.is_live:
if _is_autorest_v3(client_class):
# Create azure-identity class
from azure.identity import ClientSecretCredential
if is_async:
from azure.identity.aio import ClientSecretCredential
return ClientSecretCredential(
tenant_id=tenant_id, client_id=client_id, client_secret=secret
)
else:
# Create msrestazure class
from msrestazure.azure_active_directory import (
ServicePrincipalCredentials,
)
return ServicePrincipalCredentials(
tenant=tenant_id, client_id=client_id, secret=secret
)
else:
if _is_autorest_v3(client_class):
if is_async:
if self.is_live:
raise ValueError(
"Async live doesn't support mgmt_setting_real, please set AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET"
)
return AsyncFakeCredential()
else:
return self.settings.get_azure_core_credentials()
else:
return self.settings.get_credentials()
def create_client_from_credential(self, client_class, credential, **kwargs):
# Real client creation
# TODO decide what is the final argument for that
# if self.is_playback():
# kwargs.setdefault("polling_interval", 0)
if _is_autorest_v3(client_class):
kwargs.setdefault("logging_enable", True)
client = client_class(credential=credential, **kwargs)
else:
client = client_class(credentials=credential, **kwargs)
if self.is_playback():
try:
client._config.polling_interval = (
0 # FIXME in azure-mgmt-core, make this a kwargs
)
except AttributeError:
pass
if hasattr(client, "config"): # Autorest v2
if self.is_playback():
client.config.long_running_operation_timeout = 0
client.config.enable_http_logger = True
return client
def create_basic_client(self, client_class, **kwargs):
""" DO NOT USE ME ANYMORE."""
logger = logging.getLogger()
logger.warning(
"'create_basic_client' will be deprecated in the future. It is recommended that you use \
'get_credential' and 'create_client_from_credential' to create your client."
)
credentials = self.get_credential(client_class)
return self.create_client_from_credential(client_class, credentials, **kwargs)
def create_random_name(self, name):
return get_resource_name(name, self.qualified_test_name.encode())
def get_resource_name(self, name):
"""Alias to create_random_name for back compatibility."""
return self.create_random_name(name)
def get_replayable_random_resource_name(self, name):
"""In a replay scenario, (is not live) gives the static moniker. In the random scenario, gives generated name."""
if self.is_live:
created_name = self.create_random_name(name)
self.scrubber.register_name_pair(created_name, name)
return name
def get_preparer_resource_name(self, prefix):
"""Random name generation for use by preparers.
If prefix is a blank string, use the fully qualified test name instead.
This is what legacy tests do for resource groups."""
return self.get_resource_name(
prefix or self.qualified_test_name.replace(".", "_")
)
@staticmethod
def await_prepared_test(test_fn):
"""Synchronous wrapper for async test methods. Used to avoid making changes
upstream to AbstractPreparer, which only awaits async tests that use preparers.
(Add @AzureTestCase.await_prepared_test decorator to async tests without preparers)
# Note: this will only be needed so long as we maintain unittest.TestCase in our
test-class inheritance chain.
"""
if sys.version_info < (3, 5):
raise ImportError("Async wrapper is not needed for Python 2.7 code.")
import asyncio
@functools.wraps(test_fn)
def run(test_class_instance, *args, **kwargs):
trim_kwargs_from_test_function(test_fn, kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(test_fn(test_class_instance, **kwargs))
return run
| []
| []
| [
"AZURE_CLIENT_ID",
"AZURE_TENANT_ID",
"AZURE_CLIENT_SECRET",
"RESOURCE_REGION",
"AZURE_\" + ke"
]
| [] | ["AZURE_CLIENT_ID", "AZURE_TENANT_ID", "AZURE_CLIENT_SECRET", "RESOURCE_REGION", "AZURE_\" + ke"] | python | 5 | 0 | |
e2e-test/src/test/java/io/elastest/eim/test/e2e/PacketLossTests75.java | package io.elastest.eim.test.e2e;
import java.io.IOException;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.client.RestTemplate;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
public class PacketLossTests75 {
private String sut_address = System.getenv("ET_SUT_HOST");
private String logstash_ip = System.getenv("ET_MON_LSBEATS_HOST");
private String logstash_port = System.getenv("ET_MON_LSBEATS_PORT");
private String URL_API = "http://"+sut_address+":"+"5000";
private String server = "http://nightly.elastest.io:37004/eim/api/agent/";
private Double latency = 150.0;
public RestTemplate restTemplate = new RestTemplate();
public HttpHeaders headers = new HttpHeaders();
static String agentId;
// TODO - registerAgent_then200OK()
@Test
public void a_Test() throws InterruptedException, IOException {
System.out.println("############ Running Test1 - Register Agent: ############");
String privateKey = "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAp299wI94HsLE5TwFG84RvjtUHHLD1U5F2vTWc4CfOAj0/9kT\ny0xBUi1JCb7D8xK7gM8XMhSfAv305We85/G44pyNLX5btTZok3tagHrk11qTcxdI\ng+xOoUOQjEL5oFobFcmEOd41qyinxFJyjEoH3FdE7mMD7FyO79pUyn7GWjhWzeF2\nkZXbVj7CTVInBfIx3f0cja5tTMCDA7pnUC47OaHbts9YVhTGNH3lFOl54JsAbXl3\nE+5tTyc4kQp1LfZ7aH4hDLqLUXrzoyjs0cTVuMqJx05WYiPu31R62kxYY4ZOtw32\nUJ+aUb3tLAQrgHpC2uPExUxzjR86z5P+HZFtjQIDAQABAoIBACWLC1BtGwsSsyGP\ndrnIWBQmq3KBjUW7+k/hTGCzu3/OCll/7D9OhusNOm5T9w3+6ko0pUfWdd0u4oW5\n4BLGEaXGYqWLyrZ0T7iaFS3v4HYlWiCZXOovx2XDh5rbvatl6OLWv65WFASf5hZQ\nQl0QkHionM0zKIMMMgS4GQEashEyaeoketEORrfgM5A5yQbA/HpfDEyWO3P379SW\nbpAXWig0+JxTe31My/X6kNmY6TI0yAl1WSvTRRlA2e0OZLNU6V1sITP/2xdQdw7x\niVUrj1c9jUi17DhsbnoFjuWkBlxfczL0bYAs7r0EKAmhAF21Ohk5sTOd5dXFfQQV\nyLIy+MECgYEA1QKXXlh5P2yiRQNMzBZNYz7+xm0WKvq81UyQonGgnIIIs1Mcc4dS\nUIGmWUCbiddE2XuTjgvB9MPkjyfpEevDlY3m/X/hkpvExXtHzjgqqBSz6+A3ZOpJ\n5xBKXgjlFhlooxZPThK/0sVqPPtRP636zCWIRx914k61FSzNVjPvQR0CgYEAyTo8\neOHdxpvGsUzslAzA/XWpd7Qoao8jMkv3xLiK1AzamARwk15oRGs3LCajxiMergW3\nYrJH8ef+H/YhKl3jKvB2YG8XdWxKUjFGuxRegLmElfRGDO9UiTQHHhqJoV2l5E71\nzcXw8MjHe+V+A5bkBT0ABhC/mUfVbbaoHvMrIzECgYAh61u3RldoZiAg5Tmhdhu0\nph9j8ZMKHQtc2+hcNcPhqENCawWoz++nqj2XENazyHfKOgdxIyYrl9YZhb1zgRuQ\nARy7WVXORse6urrgd8kzIrjT4sxvYW+LP+jXuIriTgF/ltniENJC+fTE6TAy971s\nLL3atYPMGcR0LsIz6+k5cQKBgQCEowxFKawDQ56+M1QlahqzdqETs/6H7n1mo8hX\nNMTdbPORDCwgFzRnFLyzL2z4JyIL1tzAA3+EpkRNUPEfee9I2GNOwSsXTR/X+X8D\nxTNdaetI5FBgKkjwfwjKAPgDEzVLvfgrgHOGYvGKawSa3RTDlyey18tS/5Rg0usS\nK3qdoQKBgQCWsWuwmemJOz607WWgzxggZxpjUn2wByMu9tXThxRqne4O0NG32rb+\n1C4qDt+NqlUZqpMh6xRfZT62G+oTk3pHHNfIWzvSNzHS3V1Ej5RdidByP9KqSul+\nxzYcQ3VJpgNDovReXWWunpXahgcyPX2Yz7XRUyb1WMd3b7rQcrwDuw==\n-----END RSA PRIVATE KEY-----";
System.out.println("SUT Address: "+sut_address);
//System.out.println("Private key: "+privateKey);
JsonObject obj = new JsonObject();
obj.addProperty("address", new String(sut_address));
obj.addProperty("user", "root");
obj.addProperty("private_key", new String(privateKey));
obj.addProperty("logstash_ip", logstash_ip);
obj.addProperty("logstash_port", logstash_port);
obj.addProperty("password", "elastest");
System.out.println("Payload: "+obj.toString());
String URL = server;
String body = "";
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setAccept(Collections.singletonList(MediaType.APPLICATION_JSON));
int responseCode = -1;
try {
HttpEntity<String> request = new HttpEntity<String>(obj.toString(), headers);
ResponseEntity<String> response = restTemplate.exchange(URL, HttpMethod.POST, request, String.class);
body = response.getBody();
JsonParser parser = new JsonParser();
JsonObject json = (JsonObject) parser.parse(body);
agentId = json.get("agentId").getAsString();
System.out.println("############ Response for Test1: ############");
System.out.println(response);
responseCode= response.getStatusCode().value();
}catch (Exception e) {
// TODO: handle exception
System.out.println(e.getMessage());
System.out.println(e.getCause());
}
Assertions.assertEquals(200, responseCode);
}
@Test
public void b_Test() throws InterruptedException, IOException{
System.out.println("############ Running Test2: No injection cpu commands : ############");
long start = System.nanoTime();
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setAccept(Collections.singletonList(MediaType.APPLICATION_JSON));
int responseCode = -1;
double elapesedTimeInMiliSeconds = 0;
try {
HttpEntity<String> request = new HttpEntity<String>("", headers);
ResponseEntity<String> response = restTemplate.exchange(URL_API, HttpMethod.GET, request, String.class);
System.out.println(response);
long elapsedTime = System.nanoTime() - start ;
elapesedTimeInMiliSeconds = TimeUnit.MILLISECONDS.convert(elapsedTime, TimeUnit.NANOSECONDS);
System.out.println("SLO latency is <= "+latency+". Actual latency is: "+elapesedTimeInMiliSeconds+" ms");
responseCode= response.getStatusCode().value();
}catch (Exception e) {
// TODO: handle exception
System.out.println(e.getMessage());
System.out.println(e.getCause());
}
Assertions.assertEquals(200, responseCode);
}
@Test
public void c_Test() throws InterruptedException {
System.out.println("############ Running test 3: Droping 0.75% packet: ############");
String uri_packetloss_action = "controllability/"+agentId+"/packetloss";
String URL = server + uri_packetloss_action;
JsonObject obj = new JsonObject();
obj.addProperty("exec", "EXECBEAT");
obj.addProperty("component", "EIM");
obj.addProperty("packetLoss", "0.75");
obj.addProperty("stressNg", "");
obj.addProperty("dockerized", "yes");
obj.addProperty("cronExpression", "@every 60s");
System.out.println("Payload: "+obj.toString());
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setAccept(Collections.singletonList(MediaType.APPLICATION_JSON));
int responseCode = -1;
try {
HttpEntity<String> request = new HttpEntity<String>(
obj.toString(), headers);
ResponseEntity<String> response = restTemplate.exchange(URL, HttpMethod.POST, request, String.class);
System.out.println("############ Response for Test3: ############");
System.out.println(response);
TimeUnit.SECONDS.sleep(60);
responseCode = response.getStatusCode().value();
}catch (Exception e) {
// TODO: handle exception
System.out.println(e.getMessage());
System.out.println(e.getCause());
}
Assertions.assertEquals(200, responseCode);
}
@Test
public void d_Test() throws InterruptedException, IOException{
System.out.println("############ Running Test4: Max.timing "+latency+" ms: ############");
long start = System.nanoTime();
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setAccept(Collections.singletonList(MediaType.APPLICATION_JSON));
double elapesedTimeInMiliSeconds = 0;
try {
HttpEntity<String> request = new HttpEntity<String>("", headers);
ResponseEntity<String> response = restTemplate.exchange(URL_API, HttpMethod.GET, request, String.class);
System.out.println("############ Response for Test4: ############");
System.out.println(response);
long elapsedTime = System.nanoTime() - start ;
System.out.println("Timing of http request nanoseconds:" + elapsedTime);
// 1 second = 1_000ms
elapesedTimeInMiliSeconds = TimeUnit.MILLISECONDS.convert(elapsedTime, TimeUnit.NANOSECONDS);
System.out.println("SLO latency is <= "+latency+". Actual latency is: "+elapesedTimeInMiliSeconds+" ms");
}catch (Exception e) {
// TODO: handle exception
elapesedTimeInMiliSeconds = 1_000_000;
}
Assertions.assertTrue(elapesedTimeInMiliSeconds <= latency,
"SLO latency is <= "+latency+" ms. Actual latency reported by user is: " +elapesedTimeInMiliSeconds+" ms" );
}
@Test
public void e_Test() throws InterruptedException {
System.out.println("############ Running Test5: ############");
String uri_unistall_agent = agentId+"/unmonitor";
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setAccept(Collections.singletonList(MediaType.APPLICATION_JSON));
String URL = server + uri_unistall_agent;
int responseCode = -1;
try {
HttpEntity<String> request = new HttpEntity<String>("", headers);
ResponseEntity<String> response = restTemplate.exchange(URL, HttpMethod.DELETE, request, String.class);
System.out.println("############ Response for Test5: ############");
System.out.println(response);
//TimeUnit.SECONDS.sleep(180);
responseCode = response.getStatusCode().value();
}catch (Exception e) {
// TODO: handle exception
System.out.println(e.getMessage());
System.out.println(e.getCause());
}
Assertions.assertEquals(200, responseCode);
}
@Test
public void f_Test() throws InterruptedException {
System.out.println("############ Running Test6: ############");
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setAccept(Collections.singletonList(MediaType.APPLICATION_JSON));
String URL = server+agentId;
int responseCode = -1;
try {
HttpEntity<String> request = new HttpEntity<String>("", headers);
//TimeUnit.SECONDS.sleep(500);
ResponseEntity<String>response= restTemplate.exchange(URL, HttpMethod.DELETE, request, String.class);
System.out.println("############ Response for Test6: ############");
System.out.println(response);
responseCode = response.getStatusCode().value();
}catch (Exception e) {
// TODO: handle exception
System.out.println(e.getMessage());
System.out.println(e.getCause());
}
Assertions.assertEquals(200,responseCode);
}
}
| [
"\"ET_SUT_HOST\"",
"\"ET_MON_LSBEATS_HOST\"",
"\"ET_MON_LSBEATS_PORT\""
]
| []
| [
"ET_SUT_HOST",
"ET_MON_LSBEATS_HOST",
"ET_MON_LSBEATS_PORT"
]
| [] | ["ET_SUT_HOST", "ET_MON_LSBEATS_HOST", "ET_MON_LSBEATS_PORT"] | java | 3 | 0 | |
ddtrace/contrib/pyramid/patch.py | import os
import pyramid.config
from pyramid.path import caller_package
from ddtrace import config
from ddtrace.vendor import wrapt
from ...utils.formats import asbool
from ...utils.formats import get_env
from .constants import SETTINGS_ANALYTICS_ENABLED
from .constants import SETTINGS_ANALYTICS_SAMPLE_RATE
from .constants import SETTINGS_DISTRIBUTED_TRACING
from .constants import SETTINGS_SERVICE
from .trace import DD_TWEEN_NAME
from .trace import trace_pyramid
config._add(
"pyramid",
dict(
distributed_tracing=asbool(get_env("pyramid", "distributed_tracing", default=True)),
),
)
DD_PATCH = "_datadog_patch"
def patch():
"""
Patch pyramid.config.Configurator
"""
if getattr(pyramid.config, DD_PATCH, False):
return
setattr(pyramid.config, DD_PATCH, True)
_w = wrapt.wrap_function_wrapper
_w("pyramid.config", "Configurator.__init__", traced_init)
def traced_init(wrapped, instance, args, kwargs):
settings = kwargs.pop("settings", {})
service = config._get_service(default="pyramid")
# DEV: integration-specific analytics flag can be not set but still enabled
# globally for web frameworks
old_analytics_enabled = get_env("pyramid", "analytics_enabled")
analytics_enabled = os.environ.get("DD_TRACE_PYRAMID_ANALYTICS_ENABLED", old_analytics_enabled)
if analytics_enabled is not None:
analytics_enabled = asbool(analytics_enabled)
# TODO: why is analytics sample rate a string or a bool here?
old_analytics_sample_rate = get_env("pyramid", "analytics_sample_rate", default=True)
analytics_sample_rate = os.environ.get("DD_TRACE_PYRAMID_ANALYTICS_SAMPLE_RATE", old_analytics_sample_rate)
trace_settings = {
SETTINGS_SERVICE: service,
SETTINGS_DISTRIBUTED_TRACING: config.pyramid.distributed_tracing,
SETTINGS_ANALYTICS_ENABLED: analytics_enabled,
SETTINGS_ANALYTICS_SAMPLE_RATE: analytics_sample_rate,
}
# Update over top of the defaults
# DEV: If we did `settings.update(trace_settings)` then we would only ever
# have the default values.
trace_settings.update(settings)
# If the tweens are explicitly set with 'pyramid.tweens', we need to
# explicitly set our tween too since `add_tween` will be ignored.
insert_tween_if_needed(trace_settings)
kwargs["settings"] = trace_settings
# `caller_package` works by walking a fixed amount of frames up the stack
# to find the calling package. So if we let the original `__init__`
# function call it, our wrapper will mess things up.
if not kwargs.get("package", None):
# Get the packge for the third frame up from this one.
# - ddtrace.contrib.pyramid.path
# - ddtrace.vendor.wrapt
# - (this is the frame we want)
# DEV: Default is `level=2` which will give us the package from `wrapt`
kwargs["package"] = caller_package(level=3)
wrapped(*args, **kwargs)
trace_pyramid(instance)
def insert_tween_if_needed(settings):
tweens = settings.get("pyramid.tweens")
# If the list is empty, pyramid does not consider the tweens have been
# set explicitly.
# And if our tween is already there, nothing to do
if not tweens or not tweens.strip() or DD_TWEEN_NAME in tweens:
return
# pyramid.tweens.EXCVIEW is the name of built-in exception view provided by
# pyramid. We need our tween to be before it, otherwise unhandled
# exceptions will be caught before they reach our tween.
idx = tweens.find(pyramid.tweens.EXCVIEW)
if idx == -1:
settings["pyramid.tweens"] = tweens + "\n" + DD_TWEEN_NAME
else:
settings["pyramid.tweens"] = tweens[:idx] + DD_TWEEN_NAME + "\n" + tweens[idx:]
| []
| []
| [
"DD_TRACE_PYRAMID_ANALYTICS_SAMPLE_RATE",
"DD_TRACE_PYRAMID_ANALYTICS_ENABLED"
]
| [] | ["DD_TRACE_PYRAMID_ANALYTICS_SAMPLE_RATE", "DD_TRACE_PYRAMID_ANALYTICS_ENABLED"] | python | 2 | 0 | |
messaging/message.go | package messaging
import (
"encoding/json"
result "github.com/heaptracetechnology/microservice-twilio/result"
"github.com/sfreiberg/gotwilio"
"net/http"
"os"
)
type SMS struct {
From string `json:"from"`
To string `json:"to"`
Message string `json:"message"`
}
//Send Email
func Send(responseWriter http.ResponseWriter, request *http.Request) {
accountSid := os.Getenv("ACCOUNT_SID")
authToken := os.Getenv("AUTH_TOKEN")
decoder := json.NewDecoder(request.Body)
var param SMS
decodeErr := decoder.Decode(¶m)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
twilio := gotwilio.NewTwilioClient(accountSid, authToken)
res, exp, respErr := twilio.SendSMS(param.From, param.To, param.Message, "", "")
if respErr != nil {
result.WriteErrorResponse(responseWriter, respErr)
return
}
if res != nil {
bytes, _ := json.Marshal(res)
result.WriteJsonResponse(responseWriter, bytes, http.StatusOK)
} else if exp != nil {
bytes, _ := json.Marshal(res)
result.WriteJsonResponse(responseWriter, bytes, http.StatusOK)
}
}
| [
"\"ACCOUNT_SID\"",
"\"AUTH_TOKEN\""
]
| []
| [
"ACCOUNT_SID",
"AUTH_TOKEN"
]
| [] | ["ACCOUNT_SID", "AUTH_TOKEN"] | go | 2 | 0 | |
sdk/python/tests/compiler/compiler_tests.py | # Copyright 2018-2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import kfp
import kfp.compiler as compiler
import kfp.dsl as dsl
import json
import os
import shutil
import subprocess
import sys
import zipfile
import tarfile
import tempfile
import mock
import yaml
from absl.testing import parameterized
from kfp.compiler import Compiler
from kfp.dsl._component import component
from kfp.dsl import ContainerOp, pipeline, PipelineParam
from kfp.dsl.types import Integer, InconsistentTypeException
from kubernetes.client import V1Toleration, V1Affinity, V1NodeSelector, V1NodeSelectorRequirement, V1NodeSelectorTerm, \
V1NodeAffinity, V1PodDNSConfig, V1PodDNSConfigOption
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
class TestCompiler(parameterized.TestCase):
# Define the places of samples covered by unit tests.
core_sample_path = os.path.join(os.path.dirname(__file__), '..', '..', '..',
'..', 'samples', 'core',)
def test_operator_to_template(self):
"""Test converting operator to template"""
from kubernetes import client as k8s_client
def my_pipeline(msg1, json, kind, msg2='value2'):
op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'],
arguments=['echo %s %s | tee /tmp/message.txt' % (msg1, msg2)],
file_outputs={'merged': '/tmp/message.txt'}) \
.add_volume_mount(k8s_client.V1VolumeMount(
mount_path='/secret/gcp-credentials',
name='gcp-credentials')) \
.add_env_variable(k8s_client.V1EnvVar(
name='GOOGLE_APPLICATION_CREDENTIALS',
value='/secret/gcp-credentials/user-gcp-sa.json'))
res = dsl.ResourceOp(
name="test-resource",
k8s_resource=k8s_client.V1PersistentVolumeClaim(
api_version="v1",
kind=kind,
metadata=k8s_client.V1ObjectMeta(
name="resource"
)
),
attribute_outputs={"out": json},
set_owner_reference=True
)
golden_output = {
'container': {
'image': 'image',
'args': [
'echo {{inputs.parameters.msg1}} {{inputs.parameters.msg2}} | tee /tmp/message.txt'
],
'command': ['sh', '-c'],
'env': [
{
'name': 'GOOGLE_APPLICATION_CREDENTIALS',
'value': '/secret/gcp-credentials/user-gcp-sa.json'
}
],
'volumeMounts':[
{
'mountPath': '/secret/gcp-credentials',
'name': 'gcp-credentials',
}
]
},
'inputs': {'parameters':
[
{'name': 'msg1'},
{'name': 'msg2'},
]},
'name': 'echo',
'outputs': {
'artifacts': [
{
'name': 'echo-merged',
'path': '/tmp/message.txt',
},
],
'parameters': [
{'name': 'echo-merged',
'valueFrom': {'path': '/tmp/message.txt'}
}],
}
}
res_output = {
'inputs': {
'parameters': [{
'name': 'json'
}, {
'name': 'kind'
}]
},
'name': 'test-resource',
'outputs': {
'parameters': [{
'name': 'test-resource-manifest',
'valueFrom': {
'jsonPath': '{}'
}
}, {
'name': 'test-resource-name',
'valueFrom': {
'jsonPath': '{.metadata.name}'
}
}, {
'name': 'test-resource-out',
'valueFrom': {
'jsonPath': '{{inputs.parameters.json}}'
}
}]
},
'resource': {
'action': 'create',
'manifest': (
"apiVersion: v1\n"
"kind: '{{inputs.parameters.kind}}'\n"
"metadata:\n"
" name: resource\n"
),
'setOwnerReference': True
}
}
self.maxDiff = None
self.assertEqual(golden_output, compiler._op_to_template._op_to_template(op))
self.assertEqual(res_output, compiler._op_to_template._op_to_template(res))
kfp.compiler.Compiler()._compile(my_pipeline)
def _get_yaml_from_zip(self, zip_file):
with zipfile.ZipFile(zip_file, 'r') as zip:
with open(zip.extract(zip.namelist()[0]), 'r') as yaml_file:
return yaml.safe_load(yaml_file)
def _get_yaml_from_tar(self, tar_file):
with tarfile.open(tar_file, 'r:gz') as tar:
return yaml.safe_load(tar.extractfile(tar.getmembers()[0]))
def test_basic_workflow(self):
"""Test compiling a basic workflow."""
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append(test_data_dir)
import basic
tmpdir = tempfile.mkdtemp()
package_path = os.path.join(tmpdir, 'workflow.zip')
try:
compiler.Compiler().compile(basic.save_most_frequent_word, package_path)
with open(os.path.join(test_data_dir, 'basic.yaml'), 'r') as f:
golden = yaml.safe_load(f)
compiled = self._get_yaml_from_zip(package_path)
for workflow in golden, compiled:
del workflow['metadata']
for template in workflow['spec']['templates']:
template.pop('metadata', None)
self.maxDiff = None
# Comment next line for generating golden yaml.
self.assertEqual(golden, compiled)
finally:
# Replace next line with commented line for gathering golden yaml.
shutil.rmtree(tmpdir)
# print(tmpdir)
def test_basic_workflow_without_decorator(self):
"""Test compiling a workflow and appending pipeline params."""
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append(test_data_dir)
import basic_no_decorator
tmpdir = tempfile.mkdtemp()
try:
compiled_workflow = compiler.Compiler().create_workflow(
basic_no_decorator.save_most_frequent_word,
'Save Most Frequent',
'Get Most Frequent Word and Save to GCS',
[
basic_no_decorator.message_param,
basic_no_decorator.output_path_param
])
with open(os.path.join(test_data_dir, 'basic_no_decorator.yaml'), 'r') as f:
golden = yaml.safe_load(f)
name_to_template = {template['name']: template for template in compiled_workflow['spec']['templates']}
for k, v in name_to_template.items():
if k in ['exiting', 'get-frequent', 'save']:
self.assertEqual(v['metadata']['labels']['pipelines.kubeflow.org/pipeline-sdk-type'], 'kfp')
self.assertTrue(v['metadata']['labels']['pipelines.kubeflow.org/kfp_sdk_version'] is not None)
for workflow in golden, compiled_workflow:
del workflow['metadata']
for template in workflow['spec']['templates']:
template.pop('metadata', None)
self.assertEqual(golden, compiled_workflow)
finally:
shutil.rmtree(tmpdir)
def test_composing_workflow(self):
"""Test compiling a simple workflow, and a bigger one composed from the simple one."""
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append(test_data_dir)
import compose
tmpdir = tempfile.mkdtemp()
try:
# First make sure the simple pipeline can be compiled.
simple_package_path = os.path.join(tmpdir, 'simple.zip')
compiler.Compiler().compile(compose.save_most_frequent_word, simple_package_path)
# Then make sure the composed pipeline can be compiled and also compare with golden.
compose_package_path = os.path.join(tmpdir, 'compose.zip')
compiler.Compiler().compile(compose.download_save_most_frequent_word, compose_package_path)
with open(os.path.join(test_data_dir, 'compose.yaml'), 'r') as f:
golden = yaml.safe_load(f)
compiled = self._get_yaml_from_zip(compose_package_path)
for workflow in golden, compiled:
del workflow['metadata']
for template in workflow['spec']['templates']:
template.pop('metadata', None)
self.maxDiff = None
# Comment next line for generating golden yaml.
self.assertEqual(golden, compiled)
finally:
# Replace next line with commented line for gathering golden yaml.
shutil.rmtree(tmpdir)
# print(tmpdir)
def _test_py_compile_zip(self, file_base_name):
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
py_file = os.path.join(test_data_dir, file_base_name + '.py')
tmpdir = tempfile.mkdtemp()
try:
target_zip = os.path.join(tmpdir, file_base_name + '.zip')
subprocess.check_call([
'dsl-compile', '--py', py_file, '--output', target_zip])
with open(os.path.join(test_data_dir, file_base_name + '.yaml'), 'r') as f:
golden = yaml.safe_load(f)
compiled = self._get_yaml_from_zip(target_zip)
for workflow in golden, compiled:
del workflow['metadata']
for template in workflow['spec']['templates']:
template.pop('metadata', None)
self.maxDiff = None
self.assertEqual(golden, compiled)
finally:
shutil.rmtree(tmpdir)
def _test_py_compile_targz(self, file_base_name):
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
py_file = os.path.join(test_data_dir, file_base_name + '.py')
tmpdir = tempfile.mkdtemp()
try:
target_tar = os.path.join(tmpdir, file_base_name + '.tar.gz')
subprocess.check_call([
'dsl-compile', '--py', py_file, '--output', target_tar])
with open(os.path.join(test_data_dir, file_base_name + '.yaml'), 'r') as f:
golden = yaml.safe_load(f)
compiled = self._get_yaml_from_tar(target_tar)
for workflow in golden, compiled:
del workflow['metadata']
for template in workflow['spec']['templates']:
template.pop('metadata', None)
self.maxDiff = None
self.assertEqual(golden, compiled)
finally:
shutil.rmtree(tmpdir)
def _test_py_compile_yaml(self, file_base_name):
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
py_file = os.path.join(test_data_dir, file_base_name + '.py')
tmpdir = tempfile.mkdtemp()
try:
target_yaml = os.path.join(tmpdir, file_base_name + '-pipeline.yaml')
subprocess.check_call([
'dsl-compile', '--py', py_file, '--output', target_yaml])
with open(os.path.join(test_data_dir, file_base_name + '.yaml'), 'r') as f:
golden = yaml.safe_load(f)
with open(os.path.join(test_data_dir, target_yaml), 'r') as f:
compiled = yaml.safe_load(f)
for workflow in golden, compiled:
del workflow['metadata']
for template in workflow['spec']['templates']:
template.pop('metadata', None)
self.maxDiff = None
self.assertEqual(golden, compiled)
finally:
shutil.rmtree(tmpdir)
def _test_sample_py_compile_yaml(self, file_base_name):
# Jump back to sample dir for sample python file.
sample_data_dir = os.path.join(self.core_sample_path, file_base_name)
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
py_file = os.path.join(sample_data_dir, file_base_name + '.py')
tmpdir = tempfile.mkdtemp()
try:
target_yaml = os.path.join(tmpdir, file_base_name + '-pipeline.yaml')
subprocess.check_call(
['dsl-compile', '--py', py_file, '--output', target_yaml])
with open(os.path.join(test_data_dir, file_base_name + '.yaml'),
'r') as f:
golden = yaml.safe_load(f)
with open(os.path.join(test_data_dir, target_yaml), 'r') as f:
compiled = yaml.safe_load(f)
for workflow in golden, compiled:
del workflow['metadata']
for template in workflow['spec']['templates']:
template.pop('metadata', None)
self.maxDiff = None
self.assertEqual(golden, compiled)
finally:
shutil.rmtree(tmpdir)
def test_py_compile_basic(self):
"""Test basic sequential pipeline."""
self._test_py_compile_zip('basic')
def test_py_compile_with_sidecar(self):
"""Test pipeline with sidecar."""
self._test_py_compile_yaml('sidecar')
def test_py_compile_with_pipelineparams(self):
"""Test pipeline with multiple pipeline params."""
self._test_py_compile_yaml('pipelineparams')
def test_py_compile_with_opsgroups(self):
"""Test pipeline with multiple opsgroups."""
self._test_py_compile_yaml('opsgroups')
def test_py_compile_condition(self):
"""Test a pipeline with conditions."""
self._test_py_compile_zip('coin')
def test_py_compile_default_value(self):
"""Test a pipeline with a parameter with default value."""
self._test_py_compile_targz('default_value')
def test_py_volume(self):
"""Test a pipeline with a volume and volume mount."""
self._test_py_compile_yaml('volume')
@parameterized.parameters(
{'mode': 'V2_COMPATIBLE', 'is_v2': True},
{'mode': 'V1', 'is_v2': False},
{'mode': 'V1_LEGACY', 'is_v2': False},
{'mode': None, 'is_v2': False},
{'mode': 'V2_COMPATIBLE', 'env': 'V1', 'is_v2': True},
{'mode': None, 'env': 'V1', 'is_v2': False},
{'mode': None, 'env': 'V2_COMPATIBLE', 'is_v2': True},
{'mode': None, 'env': 'V1_LEGACY', 'is_v2': False},
{'mode': 'INVALID', 'error': True},
{'mode': None, 'env': 'INVALID', 'error': True},
)
def test_dsl_compile_mode(self, mode: Optional[str] = None, is_v2: Optional[bool] = None, env: Optional[str] = None, error: Optional[bool] = None):
with mock.patch.dict(os.environ, env and {'KF_PIPELINES_COMPILER_MODE': env} or {}):
file_base_name = 'two_step'
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
py_file = os.path.join(test_data_dir, f'{file_base_name}.py')
tmpdir = tempfile.mkdtemp()
try:
target_yaml = os.path.join(tmpdir, f'{file_base_name}.yaml')
args = ['dsl-compile', '--py', py_file, '--output', target_yaml]
if mode:
args = args + ['--mode', mode]
got_error = None
compiled = None
try:
subprocess.check_output(args)
with open(target_yaml, 'r') as f:
compiled = yaml.safe_load(f)
except subprocess.CalledProcessError as err:
got_error = err
if error:
if not got_error:
self.fail(f'expected error, but succeeded')
else:
if got_error:
self.fail(f'expected success, but got {got_error}')
v2_pipeline_annotation = compiled['metadata']['annotations'].get('pipelines.kubeflow.org/v2_pipeline')
if is_v2:
self.assertEqual(
'true',
v2_pipeline_annotation,
f'expected to compile in v2_compatible mode'
)
else:
self.assertEqual(
None,
v2_pipeline_annotation,
f'expected to compile in v1 mode'
)
finally:
shutil.rmtree(tmpdir)
def test_py_retry_policy(self):
"""Test retry policy is set."""
policy = 'Always'
backoff_duration = '2m'
backoff_factor = 1.5
backoff_max_duration = '3m'
def my_pipeline():
some_op().set_retry(2, policy, backoff_duration, backoff_factor,
backoff_max_duration)
workflow = kfp.compiler.Compiler()._compile(my_pipeline)
name_to_template = {template['name']: template for template in workflow['spec']['templates']}
main_dag_tasks = name_to_template[workflow['spec']['entrypoint']]['dag']['tasks']
template = name_to_template[main_dag_tasks[0]['template']]
self.assertEqual(template['retryStrategy']['retryPolicy'], policy)
self.assertEqual(template['retryStrategy']['backoff']['duration'], backoff_duration)
self.assertEqual(template['retryStrategy']['backoff']['factor'], backoff_factor)
self.assertEqual(template['retryStrategy']['backoff']['maxDuration'], backoff_max_duration)
def test_py_runtime_memory_request(self):
"""Test memory request."""
def my_pipeline(memory: str, cpu: str):
some_op().set_cpu_request(memory)
workflow = kfp.compiler.Compiler()._create_workflow(my_pipeline)
name_to_template = {template['name']: template for template in workflow['spec']['templates']}
main_dag_tasks = name_to_template[workflow['spec']['entrypoint']]['dag']['tasks']
template = name_to_template[main_dag_tasks[0]['template']]
self.assertEqual(template['podSpecPatch'], '{"containers": [{"name": "main", "resources": {"requests": {"cpu": "{{inputs.parameters.memory}}"}}}]}')
def test_py_retry_policy_invalid(self):
def my_pipeline():
some_op().set_retry(2, 'Invalid')
with self.assertRaises(ValueError):
kfp.compiler.Compiler()._compile(my_pipeline)
def test_py_retry(self):
"""Test retry functionality."""
number_of_retries = 137
def my_pipeline():
some_op().set_retry(number_of_retries)
workflow = kfp.compiler.Compiler()._compile(my_pipeline)
name_to_template = {template['name']: template for template in workflow['spec']['templates']}
main_dag_tasks = name_to_template[workflow['spec']['entrypoint']]['dag']['tasks']
template = name_to_template[main_dag_tasks[0]['template']]
self.assertEqual(template['retryStrategy']['limit'], number_of_retries)
def test_affinity(self):
"""Test affinity functionality."""
exp_affinity = {
'affinity': {
'nodeAffinity': {
'requiredDuringSchedulingIgnoredDuringExecution': {
'nodeSelectorTerms': [
{'matchExpressions': [
{
'key': 'beta.kubernetes.io/instance-type',
'operator': 'In',
'values': ['p2.xlarge']}
]
}]
}}
}
}
def my_pipeline():
affinity = V1Affinity(
node_affinity=V1NodeAffinity(
required_during_scheduling_ignored_during_execution=V1NodeSelector(
node_selector_terms=[V1NodeSelectorTerm(
match_expressions=[V1NodeSelectorRequirement(
key='beta.kubernetes.io/instance-type', operator='In', values=['p2.xlarge'])])])))
some_op().add_affinity(affinity)
workflow = kfp.compiler.Compiler()._compile(my_pipeline)
self.assertEqual(workflow['spec']['templates'][1]['affinity'], exp_affinity['affinity'])
def test_py_image_pull_secrets(self):
"""Test pipeline imagepullsecret."""
self._test_sample_py_compile_yaml('imagepullsecrets')
def test_py_timeout(self):
"""Test pipeline timeout."""
self._test_py_compile_yaml('timeout')
def test_py_recursive_do_while(self):
"""Test pipeline recursive."""
self._test_py_compile_yaml('recursive_do_while')
def test_py_recursive_while(self):
"""Test pipeline recursive."""
self._test_py_compile_yaml('recursive_while')
def test_py_resourceop_basic(self):
"""Test pipeline resourceop_basic."""
self._test_py_compile_yaml('resourceop_basic')
def test_py_volumeop_basic(self):
"""Test pipeline volumeop_basic."""
self._test_py_compile_yaml('volumeop_basic')
def test_py_volumeop_parallel(self):
"""Test pipeline volumeop_parallel."""
self._test_py_compile_yaml('volumeop_parallel')
def test_py_volumeop_dag(self):
"""Test pipeline volumeop_dag."""
self._test_py_compile_yaml('volumeop_dag')
def test_py_volume_snapshotop_sequential(self):
"""Test pipeline volume_snapshotop_sequential."""
self._test_py_compile_yaml('volume_snapshotop_sequential')
def test_py_volume_snapshotop_rokurl(self):
"""Test pipeline volumeop_sequential."""
self._test_py_compile_yaml('volume_snapshotop_rokurl')
def test_py_volumeop_sequential(self):
"""Test pipeline volumeop_sequential."""
self._test_py_compile_yaml('volumeop_sequential')
def test_py_param_substitutions(self):
"""Test pipeline param_substitutions."""
self._test_py_compile_yaml('param_substitutions')
def test_py_param_op_transform(self):
"""Test pipeline param_op_transform."""
self._test_py_compile_yaml('param_op_transform')
def test_py_preemptible_gpu(self):
"""Test preemptible GPU/TPU sample."""
self._test_sample_py_compile_yaml('preemptible_tpu_gpu')
def test_type_checking_with_consistent_types(self):
"""Test type check pipeline parameters against component metadata."""
@component
def a_op(field_m: {'GCSPath': {'path_type': 'file', 'file_type':'tsv'}}, field_o: Integer()):
return ContainerOp(
name = 'operator a',
image = 'gcr.io/ml-pipeline/component-b',
arguments = [
'--field-l', field_m,
'--field-o', field_o,
],
)
@pipeline(
name='p1',
description='description1'
)
def my_pipeline(a: {'GCSPath': {'path_type':'file', 'file_type': 'tsv'}}='good', b: Integer()=12):
a_op(field_m=a, field_o=b)
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append(test_data_dir)
tmpdir = tempfile.mkdtemp()
try:
simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')
compiler.Compiler().compile(my_pipeline, simple_package_path, type_check=True)
finally:
shutil.rmtree(tmpdir)
def test_type_checking_with_inconsistent_types(self):
"""Test type check pipeline parameters against component metadata."""
@component
def a_op(field_m: {'GCSPath': {'path_type': 'file', 'file_type':'tsv'}}, field_o: Integer()):
return ContainerOp(
name = 'operator a',
image = 'gcr.io/ml-pipeline/component-b',
arguments = [
'--field-l', field_m,
'--field-o', field_o,
],
)
@pipeline(
name='p1',
description='description1'
)
def my_pipeline(a: {'GCSPath': {'path_type':'file', 'file_type': 'csv'}}='good', b: Integer()=12):
a_op(field_m=a, field_o=b)
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append(test_data_dir)
tmpdir = tempfile.mkdtemp()
try:
simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')
with self.assertRaises(InconsistentTypeException):
compiler.Compiler().compile(my_pipeline, simple_package_path, type_check=True)
compiler.Compiler().compile(my_pipeline, simple_package_path, type_check=False)
finally:
shutil.rmtree(tmpdir)
def test_type_checking_with_json_schema(self):
"""Test type check pipeline parameters against the json schema."""
@component
def a_op(field_m: {'GCRPath': {'openapi_schema_validator': {"type": "string", "pattern": "^.*gcr\\.io/.*$"}}}, field_o: 'Integer'):
return ContainerOp(
name = 'operator a',
image = 'gcr.io/ml-pipeline/component-b',
arguments = [
'--field-l', field_m,
'--field-o', field_o,
],
)
@pipeline(
name='p1',
description='description1'
)
def my_pipeline(a: {'GCRPath': {'openapi_schema_validator': {"type": "string", "pattern": "^.*gcr\\.io/.*$"}}}='good', b: 'Integer'=12):
a_op(field_m=a, field_o=b)
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append(test_data_dir)
tmpdir = tempfile.mkdtemp()
try:
simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')
import jsonschema
with self.assertRaises(jsonschema.exceptions.ValidationError):
compiler.Compiler().compile(my_pipeline, simple_package_path, type_check=True)
finally:
shutil.rmtree(tmpdir)
def test_compile_pipeline_with_after(self):
def op():
return dsl.ContainerOp(
name='Some component name',
image='image'
)
@dsl.pipeline(name='Pipeline')
def pipeline():
task1 = op()
task2 = op().after(task1)
compiler.Compiler()._compile(pipeline)
def _test_op_to_template_yaml(self, ops, file_base_name):
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
target_yaml = os.path.join(test_data_dir, file_base_name + '.yaml')
with open(target_yaml, 'r') as f:
expected = yaml.safe_load(f)['spec']['templates'][0]
compiled_template = compiler._op_to_template._op_to_template(ops)
del compiled_template['name'], expected['name']
for output in compiled_template['outputs'].get('parameters', []) + compiled_template['outputs'].get('artifacts', []) + expected['outputs'].get('parameters', []) + expected['outputs'].get('artifacts', []):
del output['name']
assert compiled_template == expected
def test_tolerations(self):
"""Test a pipeline with a tolerations."""
op1 = dsl.ContainerOp(
name='download',
image='busybox',
command=['sh', '-c'],
arguments=['sleep 10; wget localhost:5678 -O /tmp/results.txt'],
file_outputs={'downloaded': '/tmp/results.txt'}) \
.add_toleration(V1Toleration(
effect='NoSchedule',
key='gpu',
operator='Equal',
value='run'))
self._test_op_to_template_yaml(op1, file_base_name='tolerations')
def test_set_display_name(self):
"""Test a pipeline with a customized task names."""
import kfp
op1 = kfp.components.load_component_from_text(
'''
name: Component name
implementation:
container:
image: busybox
'''
)
@dsl.pipeline()
def some_pipeline():
op1().set_display_name('Custom name')
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
template = workflow_dict['spec']['templates'][0]
self.assertEqual(template['metadata']['annotations']['pipelines.kubeflow.org/task_display_name'], 'Custom name')
def test_set_dynamic_display_name(self):
"""Test a pipeline with a customized task names."""
def some_pipeline(custom_name):
some_op().set_display_name(custom_name)
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
template = [template for template in workflow_dict['spec']['templates'] if 'container' in template][0]
self.assertNotIn('pipelineparam', template['metadata']['annotations']['pipelines.kubeflow.org/task_display_name'])
def test_set_parallelism(self):
"""Test a pipeline with parallelism limits."""
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
@dsl.pipeline()
def some_pipeline():
some_op()
some_op()
some_op()
dsl.get_pipeline_conf().set_parallelism(1)
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
self.assertEqual(workflow_dict['spec']['parallelism'], 1)
def test_set_ttl_seconds_after_finished(self):
"""Test a pipeline with ttl after finished."""
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
@dsl.pipeline()
def some_pipeline():
some_op()
dsl.get_pipeline_conf().set_ttl_seconds_after_finished(86400)
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
self.assertEqual(workflow_dict['spec']['ttlSecondsAfterFinished'], 86400)
def test_pod_disruption_budget(self):
"""Test a pipeline with poddisruption budget."""
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
@dsl.pipeline()
def some_pipeline():
some_op()
dsl.get_pipeline_conf().set_pod_disruption_budget("100%")
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
self.assertEqual(workflow_dict['spec']["podDisruptionBudget"]['minAvailable'], "100%")
def test_op_transformers(self):
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
@dsl.pipeline(name='some_pipeline')
def some_pipeline():
task1 = some_op()
task2 = some_op()
task3 = some_op()
dsl.get_pipeline_conf().op_transformers.append(lambda op: op.set_retry(5))
workflow_dict = compiler.Compiler()._compile(some_pipeline)
for template in workflow_dict['spec']['templates']:
container = template.get('container', None)
if container:
self.assertEqual(template['retryStrategy']['limit'], 5)
def test_image_pull_policy(self):
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
@dsl.pipeline(name='some_pipeline')
def some_pipeline():
task1 = some_op()
task2 = some_op()
task3 = some_op()
dsl.get_pipeline_conf().set_image_pull_policy(policy="Always")
workflow_dict = compiler.Compiler()._compile(some_pipeline)
for template in workflow_dict['spec']['templates']:
container = template.get('container', None)
if container:
self.assertEqual(template['container']['imagePullPolicy'], "Always")
def test_image_pull_policy_step_spec(self):
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
def some_other_op():
return dsl.ContainerOp(
name='other',
image='busybox',
command=['sleep 1'],
)
@dsl.pipeline(name='some_pipeline')
def some_pipeline():
task1 = some_op()
task2 = some_op()
task3 = some_other_op().set_image_pull_policy("IfNotPresent")
dsl.get_pipeline_conf().set_image_pull_policy(policy="Always")
workflow_dict = compiler.Compiler()._compile(some_pipeline)
for template in workflow_dict['spec']['templates']:
container = template.get('container', None)
if container:
if template['name' ] == "other":
self.assertEqual(template['container']['imagePullPolicy'], "IfNotPresent")
elif template['name' ] == "sleep":
self.assertEqual(template['container']['imagePullPolicy'], "Always")
def test_image_pull_policy_invalid_setting(self):
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
with self.assertRaises(ValueError):
@dsl.pipeline(name='some_pipeline')
def some_pipeline():
task1 = some_op()
task2 = some_op()
dsl.get_pipeline_conf().set_image_pull_policy(policy="Alwayss")
workflow_dict = compiler.Compiler()._compile(some_pipeline)
def test_set_default_pod_node_selector(self):
"""Test a pipeline with node selector."""
def some_op():
return dsl.ContainerOp(
name='sleep',
image='busybox',
command=['sleep 1'],
)
@dsl.pipeline()
def some_pipeline():
some_op()
dsl.get_pipeline_conf().set_default_pod_node_selector(label_name="cloud.google.com/gke-accelerator", value="nvidia-tesla-p4")
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
self.assertEqual(workflow_dict['spec']['nodeSelector'], {"cloud.google.com/gke-accelerator":"nvidia-tesla-p4"})
def test_set_dns_config(self):
"""Test a pipeline with node selector."""
@dsl.pipeline()
def some_pipeline():
some_op()
dsl.get_pipeline_conf().set_dns_config(V1PodDNSConfig(
nameservers=["1.2.3.4"],
options=[V1PodDNSConfigOption(name="ndots", value="2")]
))
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
self.assertEqual(
workflow_dict['spec']['dnsConfig'],
{"nameservers": ["1.2.3.4"], "options": [{"name": "ndots", "value": "2"}]}
)
def test_container_op_output_error_when_no_or_multiple_outputs(self):
def no_outputs_pipeline():
no_outputs_op = dsl.ContainerOp(name='dummy', image='dummy')
dsl.ContainerOp(name='dummy', image='dummy', arguments=[no_outputs_op.output])
def one_output_pipeline():
one_output_op = dsl.ContainerOp(name='dummy', image='dummy', file_outputs={'out1': 'path1'})
dsl.ContainerOp(name='dummy', image='dummy', arguments=[one_output_op.output])
def two_outputs_pipeline():
two_outputs_op = dsl.ContainerOp(name='dummy', image='dummy', file_outputs={'out1': 'path1', 'out2': 'path2'})
dsl.ContainerOp(name='dummy', image='dummy', arguments=[two_outputs_op.output])
with self.assertRaises(RuntimeError):
compiler.Compiler()._compile(no_outputs_pipeline)
compiler.Compiler()._compile(one_output_pipeline)
with self.assertRaises(RuntimeError):
compiler.Compiler()._compile(two_outputs_pipeline)
def test_withitem_basic(self):
self._test_py_compile_yaml('withitem_basic')
def test_withitem_nested(self):
self._test_py_compile_yaml('withitem_nested')
def test_add_pod_env(self):
self._test_py_compile_yaml('add_pod_env')
def test_init_container(self):
echo = dsl.UserContainer(
name='echo',
image='alpine:latest',
command=['echo', 'bye'])
@dsl.pipeline(name='InitContainer', description='A pipeline with init container.')
def init_container_pipeline():
dsl.ContainerOp(
name='hello',
image='alpine:latest',
command=['echo', 'hello'],
init_containers=[echo])
workflow_dict = compiler.Compiler()._compile(init_container_pipeline)
for template in workflow_dict['spec']['templates']:
init_containers = template.get('initContainers', None)
if init_containers:
self.assertEqual(len(init_containers),1)
init_container = init_containers[0]
self.assertEqual(init_container, {'image':'alpine:latest', 'command': ['echo', 'bye'], 'name': 'echo'})
def test_delete_resource_op(self):
"""Test a pipeline with a delete resource operation."""
from kubernetes import client as k8s
@dsl.pipeline()
def some_pipeline():
# create config map object with k6 load test script
config_map = k8s.V1ConfigMap(
api_version="v1",
data={"foo": "bar"},
kind="ConfigMap",
metadata=k8s.V1ObjectMeta(
name="foo-bar-cm",
namespace="default"
)
)
# delete the config map in k8s
dsl.ResourceOp(
name="delete-config-map",
action="delete",
k8s_resource=config_map
)
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
delete_op_template = [template for template in workflow_dict['spec']['templates'] if template['name'] == 'delete-config-map'][0]
# delete resource operation should not have success condition, failure condition or output parameters.
# See https://github.com/argoproj/argo-workflows/blob/5331fc02e257266a4a5887dfe6277e5a0b42e7fc/cmd/argoexec/commands/resource.go#L30
self.assertIsNone(delete_op_template.get("successCondition"))
self.assertIsNone(delete_op_template.get("failureCondition"))
self.assertDictEqual(delete_op_template.get("outputs", {}), {})
def test_withparam_global(self):
self._test_py_compile_yaml('withparam_global')
def test_withparam_global_dict(self):
self._test_py_compile_yaml('withparam_global_dict')
def test_withparam_output(self):
self._test_py_compile_yaml('withparam_output')
def test_withparam_output_dict(self):
self._test_py_compile_yaml('withparam_output_dict')
def test_withparam_lightweight_out(self):
self._test_py_compile_yaml('loop_over_lightweight_output')
def test_parallelfor_pipeline_param_in_items_resolving(self):
self._test_py_compile_yaml('parallelfor_pipeline_param_in_items_resolving')
def test_parallelfor_item_argument_resolving(self):
self._test_py_compile_yaml('parallelfor_item_argument_resolving')
def test_py_input_artifact_raw_value(self):
"""Test pipeline input_artifact_raw_value."""
self._test_py_compile_yaml('input_artifact_raw_value')
def test_pipeline_name_same_as_task_name(self):
def some_name():
dsl.ContainerOp(
name='some_name',
image='alpine:latest',
)
workflow_dict = compiler.Compiler()._compile(some_name)
template_names = set(template['name'] for template in workflow_dict['spec']['templates'])
self.assertGreater(len(template_names), 1)
self.assertEqual(template_names, {'some-name', 'some-name-2'})
def test_set_execution_options_caching_strategy(self):
def some_pipeline():
task = some_op()
task.execution_options.caching_strategy.max_cache_staleness = "P30D"
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
template = workflow_dict['spec']['templates'][0]
self.assertEqual(template['metadata']['annotations']['pipelines.kubeflow.org/max_cache_staleness'], "P30D")
def test_artifact_passing_using_volume(self):
self._test_py_compile_yaml('artifact_passing_using_volume')
def test_recursive_argument_mapping(self):
# Verifying that the recursive call arguments are passed correctly when specified out of order
component_2_in_0_out_op = kfp.components.load_component_from_text('''
inputs:
- name: in1
- name: in2
implementation:
container:
image: busybox
command:
- echo
- inputValue: in1
- inputValue: in2
''')
@dsl.graph_component
def subgraph(graph_in1, graph_in2):
component_2_in_0_out_op(
in1=graph_in1,
in2=graph_in2,
)
subgraph(
# Wrong order!
graph_in2=graph_in2,
graph_in1=graph_in1,
)
def some_pipeline(pipeline_in1, pipeline_in2):
subgraph(pipeline_in1, pipeline_in2)
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
subgraph_template = [template for template in workflow_dict['spec']['templates'] if 'subgraph' in template['name']][0]
recursive_subgraph_task = [task for task in subgraph_template['dag']['tasks'] if 'subgraph' in task['name']][0]
for argument in recursive_subgraph_task['arguments']['parameters']:
if argument['name'].endswith('in1'):
self.assertTrue(
argument['value'].endswith('in1}}'),
'Wrong argument mapping: "{}" passed to "{}"'.format(argument['value'], argument['name']))
elif argument['name'].endswith('in2'):
self.assertTrue(
argument['value'].endswith('in2}}'),
'Wrong argument mapping: "{}" passed to "{}"'.format(argument['value'], argument['name']))
else:
self.fail('Unexpected input name: ' + argument['name'])
def test_input_name_sanitization(self):
component_2_in_1_out_op = kfp.components.load_component_from_text('''
inputs:
- name: Input 1
- name: Input 2
outputs:
- name: Output 1
implementation:
container:
image: busybox
command:
- echo
- inputValue: Input 1
- inputPath: Input 2
- outputPath: Output 1
''')
def some_pipeline():
task1 = component_2_in_1_out_op('value 1', 'value 2')
component_2_in_1_out_op(task1.output, task1.output)
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
container_templates = [template for template in workflow_dict['spec']['templates'] if 'container' in template]
for template in container_templates:
for argument in template['inputs'].get('parameters', []):
self.assertNotIn(' ', argument['name'], 'The input name "{}" of template "{}" was not sanitized.'.format(argument['name'], template['name']))
for argument in template['inputs']['artifacts']:
self.assertNotIn(' ', argument['name'], 'The input name "{}" of template "{}" was not sanitized.'.format(argument['name'], template['name']))
def test_container_op_with_arbitrary_name(self):
def some_pipeline():
dsl.ContainerOp(
name=r''' !"#$%&'()*+,-./:;<=>?@[\]^_`''',
image='alpine:latest',
)
dsl.ContainerOp(
name=r''' !"#$%&'()*+,-./:;<=>?@[\]^_`''',
image='alpine:latest',
)
workflow_dict = compiler.Compiler()._compile(some_pipeline)
for template in workflow_dict['spec']['templates']:
self.assertNotEqual(template['name'], '')
def test_empty_string_pipeline_parameter_defaults(self):
def some_pipeline(param1: str = ''):
pass
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
self.assertEqual(workflow_dict['spec']['arguments']['parameters'][0].get('value'), '')
def test_preserving_parameter_arguments_map(self):
component_2_in_1_out_op = kfp.components.load_component_from_text('''
inputs:
- name: Input 1
- name: Input 2
outputs:
- name: Output 1
implementation:
container:
image: busybox
command:
- echo
- inputValue: Input 1
- inputPath: Input 2
- outputPath: Output 1
''')
def some_pipeline():
task1 = component_2_in_1_out_op('value 1', 'value 2')
component_2_in_1_out_op(task1.output, task1.output)
workflow_dict = kfp.compiler.Compiler()._compile(some_pipeline)
container_templates = [template for template in workflow_dict['spec']['templates'] if 'container' in template]
for template in container_templates:
parameter_arguments_json = template['metadata']['annotations']['pipelines.kubeflow.org/arguments.parameters']
parameter_arguments = json.loads(parameter_arguments_json)
self.assertEqual(set(parameter_arguments.keys()), {'Input 1'})
def test__resolve_task_pipeline_param(self):
p = PipelineParam(name='param2')
resolved = Compiler._resolve_task_pipeline_param(p, group_type=None)
self.assertEqual(resolved, "{{workflow.parameters.param2}}")
p = PipelineParam(name='param1', op_name='op1')
resolved = Compiler._resolve_task_pipeline_param(p, group_type=None)
self.assertEqual(resolved, "{{tasks.op1.outputs.parameters.op1-param1}}")
p = PipelineParam(name='param1', op_name='op1')
resolved = Compiler._resolve_task_pipeline_param(p, group_type="subgraph")
self.assertEqual(resolved, "{{inputs.parameters.op1-param1}}")
def test_uri_artifact_passing(self):
self._test_py_compile_yaml('uri_artifacts')
def test_keyword_only_argument_for_pipeline_func(self):
def some_pipeline(casual_argument: str, *, keyword_only_argument: str):
pass
kfp.compiler.Compiler()._create_workflow(some_pipeline)
def test_keyword_only_argument_for_pipeline_func_identity(self):
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append(test_data_dir)
# `@pipeline` is needed to make name the same for both functions
@pipeline(name="pipeline_func")
def pipeline_func_arg(foo_arg: str, bar_arg: str):
dsl.ContainerOp(
name='foo',
image='foo',
command=['bar'],
arguments=[foo_arg, ' and ', bar_arg]
)
@pipeline(name="pipeline_func")
def pipeline_func_kwarg(foo_arg: str, *, bar_arg: str):
return pipeline_func_arg(foo_arg, bar_arg)
pipeline_yaml_arg = kfp.compiler.Compiler()._create_workflow(pipeline_func_arg)
pipeline_yaml_kwarg = kfp.compiler.Compiler()._create_workflow(pipeline_func_kwarg)
# the yamls may differ in metadata
def remove_metadata(yaml) -> None:
del yaml['metadata']
remove_metadata(pipeline_yaml_arg)
remove_metadata(pipeline_yaml_kwarg)
# compare
self.assertEqual(pipeline_yaml_arg, pipeline_yaml_kwarg)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
changedetectionio/__init__.py | #!/usr/bin/python3
# @todo logging
# @todo extra options for url like , verify=False etc.
# @todo enable https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl as option?
# @todo option for interval day/6 hour/etc
# @todo on change detected, config for calling some API
# @todo fetch title into json
# https://distill.io/features
# proxy per check
# - flask_cors, itsdangerous,MarkupSafe
import datetime
import os
import queue
import threading
import time
from copy import deepcopy
from threading import Event
import flask_login
import pytz
import timeago
from feedgen.feed import FeedGenerator
from flask import (
Flask,
abort,
flash,
make_response,
redirect,
render_template,
request,
send_from_directory,
url_for,
)
from flask_login import login_required
from flask_wtf import CSRFProtect
from changedetectionio import html_tools
__version__ = '0.39.12'
datastore = None
# Local
running_update_threads = []
ticker_thread = None
extra_stylesheets = []
update_q = queue.Queue()
notification_q = queue.Queue()
app = Flask(__name__,
static_url_path="",
static_folder="static",
template_folder="templates")
# Stop browser caching of assets
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.exit = Event()
app.config['NEW_VERSION_AVAILABLE'] = False
app.config['LOGIN_DISABLED'] = False
#app.config["EXPLAIN_TEMPLATE_LOADING"] = True
# Disables caching of the templates
app.config['TEMPLATES_AUTO_RELOAD'] = True
csrf = CSRFProtect()
csrf.init_app(app)
notification_debug_log=[]
def init_app_secret(datastore_path):
secret = ""
path = "{}/secret.txt".format(datastore_path)
try:
with open(path, "r") as f:
secret = f.read()
except FileNotFoundError:
import secrets
with open(path, "w") as f:
secret = secrets.token_hex(32)
f.write(secret)
return secret
# Remember python is by reference
# populate_form in wtfors didnt work for me. (try using a setattr() obj type on datastore.watch?)
def populate_form_from_watch(form, watch):
for i in form.__dict__.keys():
if i[0] != '_':
p = getattr(form, i)
if hasattr(p, 'data') and i in watch:
setattr(p, "data", watch[i])
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@app.template_filter('format_last_checked_time')
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
# Worker thread tells us which UUID it is currently processing.
for t in running_update_threads:
if t.current_uuid == watch_obj['uuid']:
return "Checking now.."
if watch_obj['last_checked'] == 0:
return 'Not yet'
return timeago.format(int(watch_obj['last_checked']), time.time())
# @app.context_processor
# def timeago():
# def _timeago(lower_time, now):
# return timeago.format(lower_time, now)
# return dict(timeago=_timeago)
@app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
return timeago.format(timestamp, time.time())
# return timeago.format(timestamp, time.time())
# return datetime.datetime.utcfromtimestamp(timestamp).strftime(format)
# When nobody is logged in Flask-Login's current_user is set to an AnonymousUser object.
class User(flask_login.UserMixin):
id=None
def set_password(self, password):
return True
def get_user(self, email="[email protected]"):
return self
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
# Compare given password against JSON store or Env var
def check_password(self, password):
import base64
import hashlib
# Can be stored in env (for deployments) or in the general configs
raw_salt_pass = os.getenv("SALTED_PASS", False)
if not raw_salt_pass:
raw_salt_pass = datastore.data['settings']['application']['password']
raw_salt_pass = base64.b64decode(raw_salt_pass)
salt_from_storage = raw_salt_pass[:32] # 32 is the length of the salt
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt_from_storage,
100000
)
new_key = salt_from_storage + new_key
return new_key == raw_salt_pass
pass
def changedetection_app(config=None, datastore_o=None):
global datastore
datastore = datastore_o
#app.config.update(config or {})
login_manager = flask_login.LoginManager(app)
login_manager.login_view = 'login'
app.secret_key = init_app_secret(config['datastore_path'])
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app)
@login_manager.user_loader
def user_loader(email):
user = User()
user.get_user(email)
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
# @todo validate its a URL of this host and use that
return redirect(url_for('login', next=url_for('index')))
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for('index'))
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
# You can divide up the stuff like this
@app.route('/login', methods=['GET', 'POST'])
def login():
if not datastore.data['settings']['application']['password'] and not os.getenv("SALTED_PASS", False):
flash("Login not required, no password enabled.", "notice")
return redirect(url_for('index'))
if request.method == 'GET':
if flask_login.current_user.is_authenticated:
flash("Already logged in")
return redirect(url_for("index"))
output = render_template("login.html")
return output
user = User()
user.id = "[email protected]"
password = request.form.get('password')
if (user.check_password(password)):
flask_login.login_user(user, remember=True)
# For now there's nothing else interesting here other than the index/list page
# It's more reliable and safe to ignore the 'next' redirect
# When we used...
# next = request.args.get('next')
# return redirect(next or url_for('index'))
# We would sometimes get login loop errors on sites hosted in sub-paths
# note for the future:
# if not is_safe_url(next):
# return flask.abort(400)
return redirect(url_for('index'))
else:
flash('Incorrect password', 'error')
return redirect(url_for('login'))
@app.before_request
def do_something_whenever_a_request_comes_in():
# Disable password login if there is not one set
# (No password in settings or env var)
app.config['LOGIN_DISABLED'] = datastore.data['settings']['application']['password'] == False and os.getenv("SALTED_PASS", False) == False
# Set the auth cookie path if we're running as X-settings/X-Forwarded-Prefix
if os.getenv('USE_X_SETTINGS') and 'X-Forwarded-Prefix' in request.headers:
app.config['REMEMBER_COOKIE_PATH'] = request.headers['X-Forwarded-Prefix']
app.config['SESSION_COOKIE_PATH'] = request.headers['X-Forwarded-Prefix']
# For the RSS path, allow access via a token
if request.path == '/rss' and request.args.get('token'):
app_rss_token = datastore.data['settings']['application']['rss_access_token']
rss_url_token = request.args.get('token')
if app_rss_token == rss_url_token:
app.config['LOGIN_DISABLED'] = True
@app.route("/rss", methods=['GET'])
@login_required
def rss():
from . import diff
limit_tag = request.args.get('tag')
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
# @todo needs a .itemsWithTag() or something
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
fg = FeedGenerator()
fg.title('changedetection.io')
fg.description('Feed description')
fg.link(href='https://changedetection.io')
for watch in sorted_watches:
dates = list(watch['history'].keys())
# Re #521 - Don't bother processing this one if theres less than 2 snapshots, means we never had a change detected.
if len(dates) < 2:
continue
# Convert to int, sort and back to str again
# @todo replace datastore getter that does this automatically
dates = [int(i) for i in dates]
dates.sort(reverse=True)
dates = [str(i) for i in dates]
prev_fname = watch['history'][dates[1]]
if not watch['viewed']:
# Re #239 - GUID needs to be individual for each event
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
guid = "{}/{}".format(watch['uuid'], watch['last_changed'])
fe = fg.add_entry()
# Include a link to the diff page, they will have to login here to see if password protection is enabled.
# Description is the page you watch, link takes you to the diff JS UI page
base_url = datastore.data['settings']['application']['base_url']
if base_url == '':
base_url = "<base-url-env-var-not-set>"
diff_link = {'href': "{}{}".format(base_url, url_for('diff_history_page', uuid=watch['uuid']))}
fe.link(link=diff_link)
# @todo watch should be a getter - watch.get('title') (internally if URL else..)
watch_title = watch.get('title') if watch.get('title') else watch.get('url')
fe.title(title=watch_title)
latest_fname = watch['history'][dates[0]]
html_diff = diff.render_diff(prev_fname, latest_fname, include_equal=False, line_feed_sep="</br>")
fe.description(description="<![CDATA[<html><body><h4>{}</h4>{}</body></html>".format(watch_title, html_diff))
fe.guid(guid, permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch['newest_history_key']))
dt = dt.replace(tzinfo=pytz.UTC)
fe.pubDate(dt)
response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml')
return response
@app.route("/", methods=['GET'])
@login_required
def index():
limit_tag = request.args.get('tag')
pause_uuid = request.args.get('pause')
# Redirect for the old rss path which used the /?rss=true
if request.args.get('rss'):
return redirect(url_for('rss', tag=limit_tag))
if pause_uuid:
try:
datastore.data['watching'][pause_uuid]['paused'] ^= True
datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag))
except KeyError:
pass
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
existing_tags = datastore.get_all_tags()
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
output = render_template("watch-overview.html",
form=form,
watches=sorted_watches,
tags=existing_tags,
active_tag=limit_tag,
app_rss_token=datastore.data['settings']['application']['rss_access_token'],
has_unviewed=datastore.data['has_unviewed'],
# Don't link to hosting when we're on the hosting environment
hosted_sticky=os.getenv("SALTED_PASS", False) == False,
guid=datastore.data['app_guid'])
return output
# AJAX endpoint for sending a test
@app.route("/notification/send-test", methods=['POST'])
@login_required
def ajax_callback_send_notification_test():
import apprise
apobj = apprise.Apprise()
# validate URLS
if not len(request.form['notification_urls'].strip()):
return make_response({'error': 'No Notification URLs set'}, 400)
for server_url in request.form['notification_urls'].splitlines():
if len(server_url.strip()):
if not apobj.add(server_url):
message = '{} is not a valid AppRise URL.'.format(server_url)
return make_response({'error': message}, 400)
try:
n_object = {'watch_url': request.form['window_url'],
'notification_urls': request.form['notification_urls'].splitlines(),
'notification_title': request.form['notification_title'].strip(),
'notification_body': request.form['notification_body'].strip(),
'notification_format': request.form['notification_format'].strip()
}
notification_q.put(n_object)
except Exception as e:
return make_response({'error': str(e)}, 400)
return 'OK'
@app.route("/scrub", methods=['GET', 'POST'])
@login_required
def scrub_page():
import re
if request.method == 'POST':
confirmtext = request.form.get('confirmtext')
limit_date = request.form.get('limit_date')
limit_timestamp = 0
# Re #149 - allow empty/0 timestamp limit
if len(limit_date):
try:
limit_date = limit_date.replace('T', ' ')
# I noticed chrome will show '/' but actually submit '-'
limit_date = limit_date.replace('-', '/')
# In the case that :ss seconds are supplied
limit_date = re.sub(r'(\d\d:\d\d)(:\d\d)', '\\1', limit_date)
str_to_dt = datetime.datetime.strptime(limit_date, '%Y/%m/%d %H:%M')
limit_timestamp = int(str_to_dt.timestamp())
if limit_timestamp > time.time():
flash("Timestamp is in the future, cannot continue.", 'error')
return redirect(url_for('scrub_page'))
except ValueError:
flash('Incorrect date format, cannot continue.', 'error')
return redirect(url_for('scrub_page'))
if confirmtext == 'scrub':
changes_removed = 0
for uuid, watch in datastore.data['watching'].items():
if limit_timestamp:
changes_removed += datastore.scrub_watch(uuid, limit_timestamp=limit_timestamp)
else:
changes_removed += datastore.scrub_watch(uuid)
flash("Cleared snapshot history ({} snapshots removed)".format(changes_removed))
else:
flash('Incorrect confirmation text.', 'error')
return redirect(url_for('index'))
output = render_template("scrub.html")
return output
# If they edited an existing watch, we need to know to reset the current/previous md5 to include
# the excluded text.
def get_current_checksum_include_ignore_text(uuid):
import hashlib
from changedetectionio import fetch_site_status
# Get the most recent one
newest_history_key = datastore.get_val(uuid, 'newest_history_key')
# 0 means that theres only one, so that there should be no 'unviewed' history available
if newest_history_key == 0:
newest_history_key = list(datastore.data['watching'][uuid]['history'].keys())[0]
if newest_history_key:
with open(datastore.data['watching'][uuid]['history'][newest_history_key],
encoding='utf-8') as file:
raw_content = file.read()
handler = fetch_site_status.perform_site_check(datastore=datastore)
stripped_content = html_tools.strip_ignore_text(raw_content,
datastore.data['watching'][uuid]['ignore_text'])
if datastore.data['settings']['application'].get('ignore_whitespace', False):
checksum = hashlib.md5(stripped_content.translate(None, b'\r\n\t ')).hexdigest()
else:
checksum = hashlib.md5(stripped_content).hexdigest()
return checksum
return datastore.data['watching'][uuid]['previous_md5']
@app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
@login_required
def edit_page(uuid):
from changedetectionio import forms
form = forms.watchForm(request.form)
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
if request.method == 'GET':
if not uuid in datastore.data['watching']:
flash("No watch with the UUID %s found." % (uuid), "error")
return redirect(url_for('index'))
populate_form_from_watch(form, datastore.data['watching'][uuid])
if datastore.data['watching'][uuid]['fetch_backend'] is None:
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
if request.method == 'POST' and form.validate():
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
if form.minutes_between_check.data == datastore.data['settings']['requests']['minutes_between_check']:
form.minutes_between_check.data = None
if form.fetch_backend.data == datastore.data['settings']['application']['fetch_backend']:
form.fetch_backend.data = None
update_obj = {'url': form.url.data.strip(),
'minutes_between_check': form.minutes_between_check.data,
'tag': form.tag.data.strip(),
'title': form.title.data.strip(),
'headers': form.headers.data,
'body': form.body.data,
'method': form.method.data,
'ignore_status_codes': form.ignore_status_codes.data,
'fetch_backend': form.fetch_backend.data,
'trigger_text': form.trigger_text.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
'extract_title_as_title': form.extract_title_as_title.data,
}
# Notification URLs
datastore.data['watching'][uuid]['notification_urls'] = form.notification_urls.data
# Ignore text
form_ignore_text = form.ignore_text.data
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form_ignore_text:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid]['css_filter'] = form.css_filter.data.strip()
datastore.data['watching'][uuid]['subtractive_selectors'] = form.subtractive_selectors.data
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form.css_filter.data.strip() != datastore.data['watching'][uuid]['css_filter']:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid].update(update_obj)
flash("Updated watch.")
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
# But in the case something is added we should save straight away
datastore.sync_to_json()
# Queue the watch for immediate recheck
update_q.put(uuid)
# Diff page [edit] link should go back to diff page
if request.args.get("next") and request.args.get("next") == 'diff' and not form.save_and_preview_button.data:
return redirect(url_for('diff_history_page', uuid=uuid))
else:
if form.save_and_preview_button.data:
flash('You may need to reload this page to see the new content.')
return redirect(url_for('preview_page', uuid=uuid))
else:
return redirect(url_for('index'))
else:
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
# Re #110 offer the default minutes
using_default_minutes = False
if form.minutes_between_check.data == None:
form.minutes_between_check.data = datastore.data['settings']['requests']['minutes_between_check']
using_default_minutes = True
output = render_template("edit.html",
uuid=uuid,
watch=datastore.data['watching'][uuid],
form=form,
using_default_minutes=using_default_minutes,
current_base_url=datastore.data['settings']['application']['base_url'],
emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False)
)
return output
@app.route("/settings", methods=['GET', "POST"])
@login_required
def settings_page():
from changedetectionio import content_fetcher, forms
form = forms.globalSettingsForm(request.form)
if request.method == 'GET':
form.minutes_between_check.data = int(datastore.data['settings']['requests']['minutes_between_check'])
form.notification_urls.data = datastore.data['settings']['application']['notification_urls']
form.global_subtractive_selectors.data = datastore.data['settings']['application']['global_subtractive_selectors']
form.global_ignore_text.data = datastore.data['settings']['application']['global_ignore_text']
form.ignore_whitespace.data = datastore.data['settings']['application']['ignore_whitespace']
form.render_anchor_tag_content.data = datastore.data['settings']['application']['render_anchor_tag_content']
form.extract_title_as_title.data = datastore.data['settings']['application']['extract_title_as_title']
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
form.notification_title.data = datastore.data['settings']['application']['notification_title']
form.notification_body.data = datastore.data['settings']['application']['notification_body']
form.notification_format.data = datastore.data['settings']['application']['notification_format']
form.base_url.data = datastore.data['settings']['application']['base_url']
form.real_browser_save_screenshot.data = datastore.data['settings']['application']['real_browser_save_screenshot']
if request.method == 'POST' and form.data.get('removepassword_button') == True:
# Password unset is a GET, but we can lock the session to a salted env password to always need the password
if not os.getenv("SALTED_PASS", False):
datastore.data['settings']['application']['password'] = False
flash("Password protection removed.", 'notice')
flask_login.logout_user()
return redirect(url_for('settings_page'))
if request.method == 'POST' and form.validate():
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['requests']['minutes_between_check'] = form.minutes_between_check.data
datastore.data['settings']['application']['extract_title_as_title'] = form.extract_title_as_title.data
datastore.data['settings']['application']['fetch_backend'] = form.fetch_backend.data
datastore.data['settings']['application']['notification_title'] = form.notification_title.data
datastore.data['settings']['application']['notification_body'] = form.notification_body.data
datastore.data['settings']['application']['notification_format'] = form.notification_format.data
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['application']['base_url'] = form.base_url.data
datastore.data['settings']['application']['global_subtractive_selectors'] = form.global_subtractive_selectors.data
datastore.data['settings']['application']['global_ignore_text'] = form.global_ignore_text.data
datastore.data['settings']['application']['ignore_whitespace'] = form.ignore_whitespace.data
datastore.data['settings']['application']['real_browser_save_screenshot'] = form.real_browser_save_screenshot.data
datastore.data['settings']['application']['render_anchor_tag_content'] = form.render_anchor_tag_content.data
if not os.getenv("SALTED_PASS", False) and form.password.encrypted_password:
datastore.data['settings']['application']['password'] = form.password.encrypted_password
flash("Password protection enabled.", 'notice')
flask_login.logout_user()
return redirect(url_for('index'))
datastore.needs_write = True
flash("Settings updated.")
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
output = render_template("settings.html",
form=form,
current_base_url = datastore.data['settings']['application']['base_url'],
hide_remove_pass=os.getenv("SALTED_PASS", False),
emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False))
return output
@app.route("/import", methods=['GET', "POST"])
@login_required
def import_page():
import validators
remaining_urls = []
good = 0
if request.method == 'POST':
urls = request.values.get('urls').split("\n")
for url in urls:
url = url.strip()
url, *tags = url.split(" ")
# Flask wtform validators wont work with basic auth, use validators package
if len(url) and validators.url(url.replace('source:', '')):
new_uuid = datastore.add_watch(url=url.strip(), tag=" ".join(tags))
# Straight into the queue.
update_q.put(new_uuid)
good += 1
else:
if len(url):
remaining_urls.append(url)
flash("{} Imported, {} Skipped.".format(good, len(remaining_urls)))
if len(remaining_urls) == 0:
# Looking good, redirect to index.
return redirect(url_for('index'))
# Could be some remaining, or we could be on GET
output = render_template("import.html",
remaining="\n".join(remaining_urls)
)
return output
# Clear all statuses, so we do not see the 'unviewed' class
@app.route("/api/mark-all-viewed", methods=['GET'])
@login_required
def mark_all_viewed():
# Save the current newest history as the most recently viewed
for watch_uuid, watch in datastore.data['watching'].items():
datastore.set_last_viewed(watch_uuid, watch['newest_history_key'])
flash("Cleared all statuses.")
return redirect(url_for('index'))
@app.route("/diff/<string:uuid>", methods=['GET'])
@login_required
def diff_history_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
dates = list(watch['history'].keys())
# Convert to int, sort and back to str again
# @todo replace datastore getter that does this automatically
dates = [int(i) for i in dates]
dates.sort(reverse=True)
dates = [str(i) for i in dates]
if len(dates) < 2:
flash("Not enough saved change detection snapshots to produce a report.", "error")
return redirect(url_for('index'))
# Save the current newest history as the most recently viewed
datastore.set_last_viewed(uuid, dates[0])
newest_file = watch['history'][dates[0]]
try:
with open(newest_file, 'r') as f:
newest_version_file_contents = f.read()
except Exception as e:
newest_version_file_contents = "Unable to read {}.\n".format(newest_file)
previous_version = request.args.get('previous_version')
try:
previous_file = watch['history'][previous_version]
except KeyError:
# Not present, use a default value, the second one in the sorted list.
previous_file = watch['history'][dates[1]]
try:
with open(previous_file, 'r') as f:
previous_version_file_contents = f.read()
except Exception as e:
previous_version_file_contents = "Unable to read {}.\n".format(previous_file)
screenshot_url = datastore.get_screenshot(uuid)
output = render_template("diff.html", watch_a=watch,
newest=newest_version_file_contents,
previous=previous_version_file_contents,
extra_stylesheets=extra_stylesheets,
versions=dates[1:],
uuid=uuid,
newest_version_timestamp=dates[0],
current_previous_version=str(previous_version),
current_diff_url=watch['url'],
extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']),
left_sticky=True,
screenshot=screenshot_url)
return output
@app.route("/preview/<string:uuid>", methods=['GET'])
@login_required
def preview_page(uuid):
content = []
ignored_line_numbers = []
trigger_line_numbers = []
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
if len(watch['history']):
timestamps = sorted(watch['history'].keys(), key=lambda x: int(x))
filename = watch['history'][timestamps[-1]]
try:
with open(filename, 'r') as f:
tmp = f.readlines()
# Get what needs to be highlighted
ignore_rules = watch.get('ignore_text', []) + datastore.data['settings']['application']['global_ignore_text']
# .readlines will keep the \n, but we will parse it here again, in the future tidy this up
ignored_line_numbers = html_tools.strip_ignore_text(content="".join(tmp),
wordlist=ignore_rules,
mode='line numbers'
)
trigger_line_numbers = html_tools.strip_ignore_text(content="".join(tmp),
wordlist=watch['trigger_text'],
mode='line numbers'
)
# Prepare the classes and lines used in the template
i=0
for l in tmp:
classes=[]
i+=1
if i in ignored_line_numbers:
classes.append('ignored')
if i in trigger_line_numbers:
classes.append('triggered')
content.append({'line': l, 'classes': ' '.join(classes)})
except Exception as e:
content.append({'line': "File doesnt exist or unable to read file {}".format(filename), 'classes': ''})
else:
content.append({'line': "No history found", 'classes': ''})
screenshot_url = datastore.get_screenshot(uuid)
output = render_template("preview.html",
content=content,
extra_stylesheets=extra_stylesheets,
ignored_line_numbers=ignored_line_numbers,
triggered_line_numbers=trigger_line_numbers,
current_diff_url=watch['url'],
screenshot=screenshot_url,
watch=watch,
uuid=uuid)
return output
@app.route("/settings/notification-logs", methods=['GET'])
@login_required
def notification_logs():
global notification_debug_log
output = render_template("notification-log.html",
logs=notification_debug_log if len(notification_debug_log) else ["No errors or warnings detected"])
return output
@app.route("/api/<string:uuid>/snapshot/current", methods=['GET'])
@login_required
def api_snapshot(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
try:
watch = datastore.data['watching'][uuid]
except KeyError:
return abort(400, "No history found for the specified link, bad link?")
newest = list(watch['history'].keys())[-1]
with open(watch['history'][newest], 'r') as f:
content = f.read()
resp = make_response(content)
resp.headers['Content-Type'] = 'text/plain'
return resp
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return send_from_directory("static/images", path="favicon.ico")
# We're good but backups are even better!
@app.route("/backup", methods=['GET'])
@login_required
def get_backup():
import zipfile
from pathlib import Path
# Remove any existing backup file, for now we just keep one file
for previous_backup_filename in Path(datastore_o.datastore_path).rglob('changedetection-backup-*.zip'):
os.unlink(previous_backup_filename)
# create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(datastore_o.datastore_path, backupname)
with zipfile.ZipFile(backup_filepath, "w",
compression=zipfile.ZIP_DEFLATED,
compresslevel=8) as zipObj:
# Be sure we're written fresh
datastore.sync_to_json()
# Add the index
zipObj.write(os.path.join(datastore_o.datastore_path, "url-watches.json"), arcname="url-watches.json")
# Add the flask app secret
zipObj.write(os.path.join(datastore_o.datastore_path, "secret.txt"), arcname="secret.txt")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip.
for txt_file_path in Path(datastore_o.datastore_path).rglob('*.txt'):
parent_p = txt_file_path.parent
if parent_p.name in uuids:
zipObj.write(txt_file_path,
arcname=str(txt_file_path).replace(datastore_o.datastore_path, ''),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
# Create a list file with just the URLs, so it's easier to port somewhere else in the future
list_file = "url-list.txt"
with open(os.path.join(datastore_o.datastore_path, list_file), "w") as f:
for uuid in datastore.data["watching"]:
url = datastore.data["watching"][uuid]["url"]
f.write("{}\r\n".format(url))
list_with_tags_file = "url-list-with-tags.txt"
with open(
os.path.join(datastore_o.datastore_path, list_with_tags_file), "w"
) as f:
for uuid in datastore.data["watching"]:
url = datastore.data["watching"][uuid]["url"]
tag = datastore.data["watching"][uuid]["tag"]
f.write("{} {}\r\n".format(url, tag))
# Add it to the Zip
zipObj.write(
os.path.join(datastore_o.datastore_path, list_file),
arcname=list_file,
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8,
)
zipObj.write(
os.path.join(datastore_o.datastore_path, list_with_tags_file),
arcname=list_with_tags_file,
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8,
)
# Send_from_directory needs to be the full absolute path
return send_from_directory(os.path.abspath(datastore_o.datastore_path), backupname, as_attachment=True)
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
def static_content(group, filename):
if group == 'screenshot':
from flask import make_response
# Could be sensitive, follow password requirements
if datastore.data['settings']['application']['password'] and not flask_login.current_user.is_authenticated:
abort(403)
# These files should be in our subdirectory
try:
# set nocache, set content-type
watch_dir = datastore_o.datastore_path + "/" + filename
response = make_response(send_from_directory(filename="last-screenshot.png", directory=watch_dir, path=watch_dir + "/last-screenshot.png"))
response.headers['Content-type'] = 'image/png'
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = 0
return response
except FileNotFoundError:
abort(404)
# These files should be in our subdirectory
try:
return send_from_directory("static/{}".format(group), path=filename)
except FileNotFoundError:
abort(404)
@app.route("/api/add", methods=['POST'])
@login_required
def api_watch_add():
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
if form.validate():
url = request.form.get('url').strip()
if datastore.url_exists(url):
flash('The URL {} already exists'.format(url), "error")
return redirect(url_for('index'))
# @todo add_watch should throw a custom Exception for validation etc
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tag').strip())
# Straight into the queue.
update_q.put(new_uuid)
flash("Watch added.")
return redirect(url_for('index'))
else:
flash("Error")
return redirect(url_for('index'))
@app.route("/api/delete", methods=['GET'])
@login_required
def api_delete():
uuid = request.args.get('uuid')
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
datastore.delete(uuid)
flash('Deleted.')
return redirect(url_for('index'))
@app.route("/api/clone", methods=['GET'])
@login_required
def api_clone():
uuid = request.args.get('uuid')
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
new_uuid = datastore.clone(uuid)
update_q.put(new_uuid)
flash('Cloned.')
return redirect(url_for('index'))
@app.route("/api/checknow", methods=['GET'])
@login_required
def api_watch_checknow():
tag = request.args.get('tag')
uuid = request.args.get('uuid')
i = 0
running_uuids = []
for t in running_update_threads:
running_uuids.append(t.current_uuid)
# @todo check thread is running and skip
if uuid:
if uuid not in running_uuids:
update_q.put(uuid)
i = 1
elif tag != None:
# Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items():
if (tag != None and tag in watch['tag']):
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
else:
# No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
flash("{} watches are queued for rechecking.".format(i))
return redirect(url_for('index', tag=tag))
# @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
threading.Thread(target=notification_runner).start()
# Check for new release version, but not when running in test/build
if not os.getenv("GITHUB_REF", False):
threading.Thread(target=check_for_new_version).start()
return app
# Check for new version and anonymous stats
def check_for_new_version():
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
while not app.config.exit.is_set():
try:
r = requests.post("https://changedetection.io/check-ver.php",
data={'version': __version__,
'app_guid': datastore.data['app_guid'],
'watch_count': len(datastore.data['watching'])
},
verify=False)
except:
pass
try:
if "new_version" in r.text:
app.config['NEW_VERSION_AVAILABLE'] = True
except:
pass
# Check daily
app.config.exit.wait(86400)
def notification_runner():
global notification_debug_log
while not app.config.exit.is_set():
try:
# At the moment only one thread runs (single runner)
n_object = notification_q.get(block=False)
except queue.Empty:
time.sleep(1)
else:
# Process notifications
try:
from changedetectionio import notification
notification.process_notification(n_object, datastore)
except Exception as e:
print("Watch URL: {} Error {}".format(n_object['watch_url'], str(e)))
# UUID wont be present when we submit a 'test' from the global settings
if 'uuid' in n_object:
datastore.update_watch(uuid=n_object['uuid'],
update_obj={'last_notification_error': "Notification error detected, please see logs."})
log_lines = str(e).splitlines()
notification_debug_log += log_lines
# Trim the log length
notification_debug_log = notification_debug_log[-100:]
# Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks():
from changedetectionio import update_worker
# Spin up Workers that do the fetching
# Can be overriden by ENV or use the default settings
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
for _ in range(n_workers):
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set():
# Get a list of watches by UUID that are currently fetching data
running_uuids = []
for t in running_update_threads:
if t.current_uuid:
running_uuids.append(t.current_uuid)
# Re #232 - Deepcopy the data incase it changes while we're iterating through it all
while True:
try:
copied_datastore = deepcopy(datastore)
except RuntimeError as e:
# RuntimeError: dictionary changed size during iteration
time.sleep(0.1)
else:
break
# Re #438 - Don't place more watches in the queue to be checked if the queue is already large
while update_q.qsize() >= 2000:
time.sleep(1)
# Check for watches outside of the time threshold to put in the thread queue.
now = time.time()
max_system_wide = int(copied_datastore.data['settings']['requests']['minutes_between_check']) * 60
for uuid, watch in copied_datastore.data['watching'].items():
# No need todo further processing if it's paused
if watch['paused']:
continue
# If they supplied an individual entry minutes to threshold.
watch_minutes_between_check = watch.get('minutes_between_check', None)
if watch_minutes_between_check is not None:
# Cast to int just incase
max_time = int(watch_minutes_between_check) * 60
else:
# Default system wide.
max_time = max_system_wide
threshold = now - max_time
# Yeah, put it in the queue, it's more than time
if watch['last_checked'] <= threshold:
if not uuid in running_uuids and uuid not in update_q.queue:
update_q.put(uuid)
# Wait a few seconds before checking the list again
time.sleep(3)
# Should be low so we can break this out in testing
app.config.exit.wait(1)
| []
| []
| [
"SALTED_PASS",
"USE_X_SETTINGS",
"FETCH_WORKERS",
"GITHUB_REF",
"NOTIFICATION_MAIL_BUTTON_PREFIX"
]
| [] | ["SALTED_PASS", "USE_X_SETTINGS", "FETCH_WORKERS", "GITHUB_REF", "NOTIFICATION_MAIL_BUTTON_PREFIX"] | python | 5 | 0 | |
config/config.go | package config
import (
"os"
"path"
)
// Server contains the server configuration information
var Server = map[string]string{
"host": "localhost",
"port": "30080",
}
// Client contains the local and remote paths to fetch cached images
var Client = map[string]string{
"cache_dir": path.Join(os.Getenv("HOME"), ".fx/"),
"remote_images_url": "https://raw.githubusercontent.com/metrue/fx/master/images.zip",
}
var GrpcEndpoint = ":5000"
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
configs/Configs.go | // Author: James Mallon <[email protected]>
// configs package - the goal of the file is having a singleton form of reading the
// .env file, not reopening it time after time - while it makes necessary to
// recompile the code after adding new values to the .env file, it improves the
// performance
package configs
import (
"encoding/json"
"io/ioutil"
"log"
"os"
)
// environment values
var Env map[string]string
// init function - data and process initialization
func init() {
envFile := os.Getenv("GOPATH") + "/src/GoAuthorization/env.json"
jsonFile, e := os.Open(envFile)
if e != nil {
log.Fatal(e.Error())
}
jsonData, e := ioutil.ReadAll(jsonFile)
if e != nil {
log.Fatal(e.Error())
}
json.Unmarshal(jsonData, &Env)
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "leaderboard.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test/get_previous_releases.py | #!/usr/bin/env python3
#
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Download or build previous releases.
# Needs curl and tar to download a release, or the build dependencies when
# building a release.
import argparse
import contextlib
from fnmatch import fnmatch
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
import hashlib
SHA256_SUMS = {
"d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "flocoin-0.15.2-aarch64-linux-gnu.tar.gz",
"54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "flocoin-0.15.2-arm-linux-gnueabihf.tar.gz",
"2b843506c3f1af0eeca5854a920264f9a829f02d0d50328005950ddcbe88874d": "flocoin-0.15.2-i686-pc-linux-gnu.tar.gz",
"87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "flocoin-0.15.2-osx64.tar.gz",
"566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "flocoin-0.15.2-x86_64-linux-gnu.tar.gz",
#
"0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "flocoin-0.16.3-aarch64-linux-gnu.tar.gz",
"fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "flocoin-0.16.3-arm-linux-gnueabihf.tar.gz",
"75a537844313b0a84bdb61ffcdc5c4ce19a738f7ddf71007cd2edf664efd7c37": "flocoin-0.16.3-i686-pc-linux-gnu.tar.gz",
"78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "flocoin-0.16.3-osx64.tar.gz",
"5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "flocoin-0.16.3-x86_64-linux-gnu.tar.gz",
#
"5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "flocoin-0.17.2-aarch64-linux-gnu.tar.gz",
"d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "flocoin-0.17.2-arm-linux-gnueabihf.tar.gz",
"d295fc93f39bbf0fd937b730a93184899a2eb6c3a6d53f3d857cbe77ef89b98c": "flocoin-0.17.2-i686-pc-linux-gnu.tar.gz",
"a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "flocoin-0.17.2-osx64.tar.gz",
"943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "flocoin-0.17.2-x86_64-linux-gnu.tar.gz",
#
"88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "flocoin-0.18.1-aarch64-linux-gnu.tar.gz",
"cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "flocoin-0.18.1-arm-linux-gnueabihf.tar.gz",
"989e847b3e95fc9fedc0b109cae1b4fa43348f2f712e187a118461876af9bd16": "flocoin-0.18.1-i686-pc-linux-gnu.tar.gz",
"b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "flocoin-0.18.1-osx64.tar.gz",
"425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "flocoin-0.18.1-riscv64-linux-gnu.tar.gz",
"600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "flocoin-0.18.1-x86_64-linux-gnu.tar.gz",
#
"3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "flocoin-0.19.1-aarch64-linux-gnu.tar.gz",
"657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "flocoin-0.19.1-arm-linux-gnueabihf.tar.gz",
"10d1e53208aa7603022f4acc084a046299ab4ccf25fe01e81b3fb6f856772589": "flocoin-0.19.1-i686-pc-linux-gnu.tar.gz",
"1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "flocoin-0.19.1-osx64.tar.gz",
"aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "flocoin-0.19.1-riscv64-linux-gnu.tar.gz",
"5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "flocoin-0.19.1-x86_64-linux-gnu.tar.gz",
#
"60c93e3462c303eb080be7cf623f1a7684b37fd47a018ad3848bc23e13c84e1c": "flocoin-0.20.1-aarch64-linux-gnu.tar.gz",
"55b577e0fb306fb429d4be6c9316607753e8543e5946b542d75d876a2f08654c": "flocoin-0.20.1-arm-linux-gnueabihf.tar.gz",
"b9024dde373ea7dad707363e07ec7e265383204127539ae0c234bff3a61da0d1": "flocoin-0.20.1-osx64.tar.gz",
"c378d4e21109f09e8829f3591e015c66632dff2925a60b64d259be05a334c30b": "flocoin-0.20.1-osx.dmg",
"fa71cb52ee5e0459cbf5248cdec72df27995840c796f58b304607a1ed4c165af": "flocoin-0.20.1-riscv64-linux-gnu.tar.gz",
"376194f06596ecfa40331167c39bc70c355f960280bd2a645fdbf18f66527397": "flocoin-0.20.1-x86_64-linux-gnu.tar.gz",
}
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
def download_binary(tag, args) -> int:
if Path(tag).is_dir():
if not args.remove_dir:
print('Using cached {}'.format(tag))
return 0
shutil.rmtree(tag)
Path(tag).mkdir()
bin_path = 'bin/flocoin-core-{}'.format(tag[1:])
match = re.compile('v(.*)(rc[0-9]+)$').search(tag)
if match:
bin_path = 'bin/flocoin-core-{}/test.{}'.format(
match.group(1), match.group(2))
tarball = 'flocoin-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=args.platform)
tarballUrl = 'https://flocoincore.org/{bin_path}/{tarball}'.format(
bin_path=bin_path, tarball=tarball)
print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl))
header, status = subprocess.Popen(
['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate()
if re.search("404 Not Found", header.decode("utf-8")):
print("Binary tag was not found")
return 1
curlCmds = [
['curl', '--remote-name', tarballUrl]
]
for cmd in curlCmds:
ret = subprocess.run(cmd).returncode
if ret:
return ret
hasher = hashlib.sha256()
with open(tarball, "rb") as afile:
hasher.update(afile.read())
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
if tarball in SHA256_SUMS.values():
print("Checksum did not match")
return 1
print("Checksum for given version doesn't exist")
return 1
print("Checksum matched")
# Extract tarball
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
'--strip-components=1',
'flocoin-{tag}'.format(tag=tag[1:])]).returncode
if ret:
return ret
Path(tarball).unlink()
return 0
def build_release(tag, args) -> int:
githubUrl = "https://github.com/flocoin/flocoin"
if args.remove_dir:
if Path(tag).is_dir():
shutil.rmtree(tag)
if not Path(tag).is_dir():
# fetch new tags
subprocess.run(
["git", "fetch", githubUrl, "--tags"])
output = subprocess.check_output(['git', 'tag', '-l', tag])
if not output:
print('Tag {} not found'.format(tag))
return 1
ret = subprocess.run([
'git', 'clone', githubUrl, tag
]).returncode
if ret:
return ret
with pushd(tag):
ret = subprocess.run(['git', 'checkout', tag]).returncode
if ret:
return ret
host = args.host
if args.depends:
with pushd('depends'):
ret = subprocess.run(['make', 'NO_QT=1']).returncode
if ret:
return ret
host = os.environ.get(
'HOST', subprocess.check_output(['./config.guess']))
config_flags = '--prefix={pwd}/depends/{host} '.format(
pwd=os.getcwd(),
host=host) + args.config_flags
cmds = [
'./autogen.sh',
'./configure {}'.format(config_flags),
'make',
]
for cmd in cmds:
ret = subprocess.run(cmd.split()).returncode
if ret:
return ret
# Move binaries, so they're in the same place as in the
# release download
Path('bin').mkdir(exist_ok=True)
files = ['flocoind', 'flocoin-cli', 'flocoin-tx']
for f in files:
Path('src/'+f).rename('bin/'+f)
return 0
def check_host(args) -> int:
args.host = os.environ.get('HOST', subprocess.check_output(
'./depends/config.guess').decode())
if args.download_binary:
platforms = {
'aarch64-*-linux*': 'aarch64-linux-gnu',
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'osx64',
}
args.platform = ''
for pattern, target in platforms.items():
if fnmatch(args.host, pattern):
args.platform = target
if not args.platform:
print('Not sure which binary to download for {}'.format(args.host))
return 1
return 0
def main(args) -> int:
Path(args.target_dir).mkdir(exist_ok=True, parents=True)
print("Releases directory: {}".format(args.target_dir))
ret = check_host(args)
if ret:
return ret
if args.download_binary:
with pushd(args.target_dir):
for tag in args.tags:
ret = download_binary(tag, args)
if ret:
return ret
return 0
args.config_flags = os.environ.get('CONFIG_FLAGS', '')
args.config_flags += ' --without-gui --disable-tests --disable-bench'
with pushd(args.target_dir):
for tag in args.tags:
ret = build_release(tag, args)
if ret:
return ret
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--remove-dir', action='store_true',
help='remove existing directory.')
parser.add_argument('-d', '--depends', action='store_true',
help='use depends.')
parser.add_argument('-b', '--download-binary', action='store_true',
help='download release binary.')
parser.add_argument('-t', '--target-dir', action='store',
help='target directory.', default='releases')
parser.add_argument('tags', nargs='+',
help="release tags. e.g.: v0.18.1 v0.20.0rc2")
args = parser.parse_args()
sys.exit(main(args))
| []
| []
| [
"HOST",
"CONFIG_FLAGS"
]
| [] | ["HOST", "CONFIG_FLAGS"] | python | 2 | 0 | |
test/e2e/kamsuite/kamsuite.go | package kamsuite
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"log"
"net/url"
"os"
"os/exec"
"regexp"
"strings"
"time"
"github.com/cucumber/godog"
"github.com/cucumber/messages-go/v10"
"github.com/jenkins-x/go-scm/scm"
"github.com/jenkins-x/go-scm/scm/factory"
"github.com/redhat-developer/kam/pkg/pipelines/git"
)
// FeatureContext defines godog.Suite steps for the test suite.
func FeatureContext(s *godog.Suite) {
// KAM related steps
s.Step(`^directory "([^"]*)" should exist$`,
DirectoryShouldExist)
s.Step(`^gitops repository is created$`,
createRepository)
s.Step(`^login argocd API server$`,
loginArgoAPIServerLogin)
s.Step(`^application "([^"]*)" should be in "([^"]*)" state$`,
applicationState)
s.Step(`^Wait for "(\d*)" seconds$`,
waitForTime)
s.BeforeSuite(func() {
fmt.Println("Before suite")
if !envVariableCheck() {
os.Exit(1)
}
})
s.AfterSuite(func() {
fmt.Println("After suite")
})
s.BeforeFeature(func(this *messages.GherkinDocument) {
fmt.Println("Before feature")
})
s.AfterFeature(func(this *messages.GherkinDocument) {
fmt.Println("After feature")
})
s.BeforeScenario(func(this *messages.Pickle) {
fmt.Println("Before scenario")
})
s.AfterScenario(func(*messages.Pickle, error) {
fmt.Println("After scenario")
re := regexp.MustCompile(`[a-z]+`)
scm := re.FindAllString(os.Getenv("GITOPS_REPO_URL"), 2)[1]
switch scm {
case "github":
deleteGithubRepository(os.Getenv("GITOPS_REPO_URL"), os.Getenv("GIT_ACCESS_TOKEN"))
case "gitlab":
deleteGitlabRepoStep := []string{"repo", "delete", strings.Split(strings.Split(os.Getenv("GITOPS_REPO_URL"), ".com/")[1], ".")[0], "-y"}
ok, errMessage := deleteGitlabRepository(deleteGitlabRepoStep)
if !ok {
fmt.Println(errMessage)
}
default:
fmt.Println("SCM is not supported")
}
})
}
func envVariableCheck() bool {
envVars := []string{"SERVICE_REPO_URL", "GITOPS_REPO_URL", "IMAGE_REPO", "DOCKERCONFIGJSON_PATH", "GIT_ACCESS_TOKEN"}
val, ok := os.LookupEnv("CI")
if !ok {
for _, envVar := range envVars {
_, ok := os.LookupEnv(envVar)
if !ok {
fmt.Printf("%s is not set\n", envVar)
return false
}
}
re := regexp.MustCompile(`[a-z]+`)
scm := re.FindAllString(os.Getenv("GITOPS_REPO_URL"), 2)[1]
switch scm {
case "github":
os.Setenv("GITHUB_TOKEN", os.Getenv("GIT_ACCESS_TOKEN"))
case "gitlab":
os.Setenv("GITLAB_TOKEN", os.Getenv("GIT_ACCESS_TOKEN"))
default:
fmt.Println("SCM is not supported")
}
} else {
if val == "prow" {
fmt.Printf("Running e2e test in OpenShift CI\n")
os.Setenv("SERVICE_REPO_URL", "https://github.com/kam-bot/taxi")
os.Setenv("GITOPS_REPO_URL", "https://github.com/kam-bot/taxi-"+os.Getenv("PRNO"))
os.Setenv("IMAGE_REPO", "quay.io/kam-bot/taxi")
os.Setenv("DOCKERCONFIGJSON_PATH", os.Getenv("KAM_QUAY_DOCKER_CONF_SECRET_FILE"))
os.Setenv("GIT_ACCESS_TOKEN", os.Getenv("GITHUB_TOKEN"))
} else {
fmt.Printf("You cannot run e2e test locally against OpenShift CI\n")
return false
}
return true
}
return true
}
func deleteGitlabRepository(arg []string) (bool, string) {
var stderr bytes.Buffer
cmd := exec.Command("glab", arg...)
fmt.Println("gitlab command is : ", cmd.Args)
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return false, stderr.String()
}
return true, stderr.String()
}
func deleteGithubRepository(repoURL, token string) {
repo, err := git.NewRepository(repoURL, token)
if err != nil {
log.Fatal(err)
}
parsed, err := url.Parse(repoURL)
if err != nil {
log.Fatalf("failed to parse repository URL %q: %v", repoURL, err)
}
repoName, err := git.GetRepoName(parsed)
if err != nil {
log.Fatal(err)
}
_, err = repo.Repositories.Delete(context.TODO(), repoName)
if err != nil {
log.Printf("unable to delete repository: %v", err)
} else {
log.Printf("Successfully deleted repository: %q", repoURL)
}
}
func createRepository() error {
repoName := strings.Split(os.Getenv("GITOPS_REPO_URL"), "/")[4]
parsed, err := url.Parse(os.Getenv("GITOPS_REPO_URL"))
if err != nil {
return err
}
parsed.User = url.UserPassword("", os.Getenv("GITHUB_TOKEN"))
client, err := factory.FromRepoURL(parsed.String())
if err != nil {
return err
}
ri := &scm.RepositoryInput{
Private: false,
Description: "repocreate",
Namespace: "",
Name: repoName,
}
_, _, err = client.Repositories.Create(context.Background(), ri)
if err != nil {
return err
}
return nil
}
func waitForTime(wait int) error {
time.Sleep(time.Duration(wait) * time.Second)
return nil
}
func applicationState(appName string, appState string) error {
err := argoAppStatusMatch(appState, appName)
if err != nil {
return fmt.Errorf("Error is : %v", err)
}
return nil
}
func loginArgoAPIServerLogin() error {
var stderr bytes.Buffer
argocdPath, err := executableBinaryPath("argocd")
if err != nil {
return err
}
argocdServer, err := argocdAPIServer()
if err != nil {
return err
}
argocdPassword, err := argocdAPIServerPassword()
if err != nil {
return err
}
cmd := exec.Command(argocdPath, "login", "--username", "admin", "--password", argocdPassword, argocdServer, "--grpc-web", "--insecure")
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
fmt.Println(stderr.String())
return err
}
return nil
}
func executableBinaryPath(executable string) (string, error) {
path, err := exec.LookPath(executable)
if err != nil {
return "", err
}
return path, nil
}
func argocdAPIServer() (string, error) {
var stdout bytes.Buffer
ocPath, err := executableBinaryPath("oc")
if err != nil {
return "", err
}
deployments := []string{"openshift-gitops-server", "openshift-gitops-repo-server",
"openshift-gitops-redis", "openshift-gitops-applicationset-controller", "kam", "cluster"}
for deployment := range deployments {
if waitForDeploymentsUpAndRunning("openshift-gitops", deployments[deployment]); err != nil {
return "", err
}
}
cmd := exec.Command(ocPath, "get", "routes", "-n", "openshift-gitops",
"-o", "jsonpath='{.items[?(@.metadata.name==\"openshift-gitops-server\")].spec.host}'")
cmd.Stdout = &stdout
err = cmd.Run()
if err != nil {
return "", err
}
return strings.Trim(stdout.String(), "'"), nil
}
func argocdAPIServerPassword() (string, error) {
var stdout, stderr bytes.Buffer
ocPath, err := executableBinaryPath("oc")
if err != nil {
return "", err
}
cmd := exec.Command(ocPath, "get", "secret", "openshift-gitops-cluster", "-n", "openshift-gitops", "-o", "jsonpath='{.data.admin\\.password}'")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
return "", err
}
data, err := base64.StdEncoding.DecodeString(strings.Trim(stdout.String(), "'"))
if err != nil {
return "", err
}
return string(data), nil
}
func waitForDeploymentsUpAndRunning(namespace string, deploymentName string) error {
var stderr, stdout bytes.Buffer
ocPath, err := executableBinaryPath("oc")
if err != nil {
return err
}
cmd := exec.Command(ocPath, "rollout", "status", "deployment", deploymentName, "-n", namespace)
cmd.Stderr = &stderr
cmd.Stdout = &stdout
err = cmd.Run()
if err == nil && strings.Contains(stdout.String(), "successfully rolled out") {
return nil
}
return fmt.Errorf("Error is : %v", stderr.String())
}
func argoAppStatusMatch(matchString string, appName string) error {
var stdout, stderr bytes.Buffer
argocdPath, err := executableBinaryPath("argocd")
if err != nil {
return err
}
appList := []string{"app", "list"}
cmd := exec.Command(argocdPath, appList...)
cmd.Stdout = &stdout
if err = cmd.Run(); err != nil {
return err
}
re, _ := regexp.Compile(appName + ".+")
appDetailsString := re.FindString(stdout.String())
if strings.Contains(appDetailsString, matchString) {
return nil
}
return fmt.Errorf("Error is : %v", stderr.String())
}
| [
"\"GITOPS_REPO_URL\"",
"\"GITOPS_REPO_URL\"",
"\"GIT_ACCESS_TOKEN\"",
"\"GITOPS_REPO_URL\"",
"\"GITOPS_REPO_URL\"",
"\"GIT_ACCESS_TOKEN\"",
"\"GIT_ACCESS_TOKEN\"",
"\"PRNO\"",
"\"KAM_QUAY_DOCKER_CONF_SECRET_FILE\"",
"\"GITHUB_TOKEN\"",
"\"GITOPS_REPO_URL\"",
"\"GITOPS_REPO_URL\"",
"\"GITHUB_TOKEN\""
]
| []
| [
"GITOPS_REPO_URL",
"PRNO",
"GIT_ACCESS_TOKEN",
"KAM_QUAY_DOCKER_CONF_SECRET_FILE",
"GITHUB_TOKEN"
]
| [] | ["GITOPS_REPO_URL", "PRNO", "GIT_ACCESS_TOKEN", "KAM_QUAY_DOCKER_CONF_SECRET_FILE", "GITHUB_TOKEN"] | go | 5 | 0 | |
route/admin.go | package route
import (
"net/http"
"os"
"github.com/so-chiru/llct-server/metaify/metautils"
)
func adminHandler(w http.ResponseWriter, r *http.Request) {
var pass = r.URL.Query().Get("pass")
var PASS = os.Getenv("ADMIN_PASSPHRASE")
if PASS == "" || pass != PASS {
var error_string = []byte("올바르지 않은 요청입니다.")
CreateJsonResponse(&w, false, &error_string)
return
}
result, err := metautils.PullUpdate()
if err != nil {
var error_string = []byte(err.Error())
CreateJsonResponse(&w, false, &error_string)
return
}
var data = []byte(result)
CreateJsonResponse(&w, true, &data)
}
| [
"\"ADMIN_PASSPHRASE\""
]
| []
| [
"ADMIN_PASSPHRASE"
]
| [] | ["ADMIN_PASSPHRASE"] | go | 1 | 0 | |
vote/app.py | from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "Cats")
option_b = os.getenv('OPTION_B', "Dogs")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
g.redis = Redis(host="redis", db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='127.0.0.1', port=80, debug=True, threaded=True)
| []
| []
| [
"OPTION_A",
"OPTION_B"
]
| [] | ["OPTION_A", "OPTION_B"] | python | 2 | 0 | |
bindings/twitter/twitter_test.go | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package twitter
import (
"encoding/json"
"os"
"testing"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/dapr/pkg/logger"
"github.com/dghubble/go-twitter/twitter"
"github.com/stretchr/testify/assert"
)
const (
testTwitterConsumerKey = "test-consumerKey"
testTwitterConsumerSecret = "test-consumerSecret"
testTwitterAccessToken = "test-accessToken"
testTwitterAccessSecret = "test-accessSecret"
)
func getTestMetadata() bindings.Metadata {
m := bindings.Metadata{}
m.Properties = map[string]string{
"consumerKey": testTwitterConsumerKey,
"consumerSecret": testTwitterConsumerSecret,
"accessToken": testTwitterAccessToken,
"accessSecret": testTwitterAccessSecret,
}
return m
}
func getRuntimeMetadata() map[string]string {
return map[string]string{
"consumerKey": os.Getenv("CONSUMER_KEY"),
"consumerSecret": os.Getenv("CONSUMER_SECRET"),
"accessToken": os.Getenv("ACCESS_TOKEN"),
"accessSecret": os.Getenv("ACCESS_SECRET"),
}
}
// go test -v -count=1 ./bindings/twitter/
func TestInit(t *testing.T) {
m := getTestMetadata()
tw := NewTwitter(logger.NewLogger("test"))
err := tw.Init(m)
assert.Nilf(t, err, "error initializing valid metadata properties")
}
// TestReadError excutes the Read method and fails before the Twitter API call
// go test -v -count=1 -run TestReadError ./bindings/twitter/
func TestReadError(t *testing.T) {
tw := NewTwitter(logger.NewLogger("test"))
m := getTestMetadata()
err := tw.Init(m)
assert.Nilf(t, err, "error initializing valid metadata properties")
tw.Read(func(res *bindings.ReadResponse) error {
t.Logf("result: %+v", res)
assert.NotNilf(t, err, "no error on read with invalid credentials")
return nil
})
}
// TestRead executes the Read method which calls Twiter API
// env RUN_LIVE_TW_TEST=true go test -v -count=1 -run TestReed ./bindings/twitter/
func TestReed(t *testing.T) {
if os.Getenv("RUN_LIVE_TW_TEST") != "true" {
t.SkipNow() // skip this test until able to read credentials in test infra
}
m := bindings.Metadata{}
m.Properties = getRuntimeMetadata()
// add query
m.Properties["query"] = "microsoft"
tw := NewTwitter(logger.NewLogger("test"))
tw.logger.SetOutputLevel(logger.DebugLevel)
err := tw.Init(m)
assert.Nilf(t, err, "error initializing read")
counter := 0
err = tw.Read(func(res *bindings.ReadResponse) error {
counter++
t.Logf("tweet[%d]", counter)
var tweet twitter.Tweet
json.Unmarshal(res.Data, &tweet)
assert.NotEmpty(t, tweet.IDStr, "tweet should have an ID")
os.Exit(0)
return nil
})
assert.Nilf(t, err, "error on read")
}
// TestInvoke executes the Invoke method which calls Twiter API
// test tokens must be set
// env RUN_LIVE_TW_TEST=true go test -v -count=1 -run TestInvoke ./bindings/twitter/
func TestInvoke(t *testing.T) {
if os.Getenv("RUN_LIVE_TW_TEST") != "true" {
t.SkipNow() // skip this test until able to read credentials in test infra
}
m := bindings.Metadata{}
m.Properties = getRuntimeMetadata()
tw := NewTwitter(logger.NewLogger("test"))
tw.logger.SetOutputLevel(logger.DebugLevel)
err := tw.Init(m)
assert.Nilf(t, err, "error initializing Invoke")
req := &bindings.InvokeRequest{
Metadata: map[string]string{
"query": "microsoft",
},
}
resp, err := tw.Invoke(req)
assert.Nilf(t, err, "error on invoke")
assert.NotNil(t, resp)
}
| [
"\"CONSUMER_KEY\"",
"\"CONSUMER_SECRET\"",
"\"ACCESS_TOKEN\"",
"\"ACCESS_SECRET\"",
"\"RUN_LIVE_TW_TEST\"",
"\"RUN_LIVE_TW_TEST\""
]
| []
| [
"CONSUMER_SECRET",
"CONSUMER_KEY",
"ACCESS_SECRET",
"ACCESS_TOKEN",
"RUN_LIVE_TW_TEST"
]
| [] | ["CONSUMER_SECRET", "CONSUMER_KEY", "ACCESS_SECRET", "ACCESS_TOKEN", "RUN_LIVE_TW_TEST"] | go | 5 | 0 | |
Python/utils.py | import os, signal, subprocess, sys, threading
def isint(s):
'''
Check if a string represents an int.
Note: Numbers in scientific notation (e.g., 1e3) are floats in Python,
and `isint('1e3')` will return False.
Source: https://stackoverflow.com/a/1265696
'''
if len(s) < 1:
return False
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def isfloat(s):
'''
Check if a string represents a float.
Notes
- If the string represents an int, this function will still return True.
- To determine whether a string `s` represents an int or a float, consider the following options:
1. Use `isint(s)` first, then `isfloat(s)` if the former returns False.
2. Use `isfloat()` first, then `float(s).is_integer()` if the former returns True.
Source: https://stackoverflow.com/a/15357477
'''
try:
float(s)
except ValueError:
return False
return True
def wrap_structure(x, structure):
'''
Enclose object in a data structure if it is not already of that type.
Args
- x: object
- structure: str or data structure
'list', list, 'tuple', tuple, 'set', or set
Raises ValueError if unsupported data structure given.
Returns: list, tuple, or set
'''
if structure in ('list', list):
return x if type(x) == list else [x]
elif structure in ('tuple', tuple):
return x if type(x) == tuple else (x,)
elif structure in ('set', set):
return x if type(x) == set else {x}
else:
raise ValueError(f'{str(structure)} not supported. Returning original object.')
def get_available_cpus(job_scheduler=None):
'''
Get the number of CPUs the current process can use.
Args
- job_scheduler: str. default=None
Job scheduling environment. Currently supports 'SLURM'
Returns: int or None
May return None if the number of CPUs in the system is undetermined.
See https://docs.python.org/3/library/os.html#os.cpu_count.
'''
try:
return len(os.sched_getaffinity(0))
except:
try:
if job_scheduler and job_scheduler.upper() == 'SLURM':
return os.environ['SLURM_CPUS_PER_TASK']
except:
pass
print('Unable to detect number of available CPUs. Returning number of total CPUs.', file=sys.stderr)
return os.cpu_count()
def wrap_signal_handler(fun, sig, handler=signal.SIG_DFL):
'''
Wrap a function with a signal handler.
Args
- fun: function
Function to to wrap
- sig: signal
Signal to handle with handler
- handler: function or signal.SIG_IGN or signal.SIG_DFL. default=signal.SIG_DFL
Signal handler
Returns: function
Examples
- Wrap a function such that it ignores SIGINT:
wrap_signal_handler(fun, signal.SIGINT, handler=signal.SIG_IGN)
'''
def newfun(*args, **kwargs):
signal.signal(sig, handler)
return fun(*args, **kwargs)
return newfun
class empty_context:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
class ThreadPool:
'''
Thread pool to drive subprocesses.
Usage
pool = ThreadPool(nproc)
pool.schedule(name, cmd)
pool.join()
Example
pool = ThreadPool(4)
pool.schedule('example1', {'args': ['ls', '-l']})
pool.join()
Notes
- This implementation relies on subprocess.Popen() to spawn subprocesses. Compare with the following:
- subprocess.run()
Compare with the standard multiprocessing library
- SIGINT signals (e.g., from Ctrl-C) will terminate process directly created
by multiprocessing.Process or multiprocessing.[dummy.]Pool, but not descendant processes
- Default multiprocessing.Pool on Unix systems uses a 'fork' start method, which will
duplicate the entire parent's memory.
'''
class __worker_data__:
def __init__(self):
self.available = True
self.name = None
self.cmd = None
self.execute = threading.Semaphore(0)
def __init__(self, nproc):
assert nproc > 0
self.nproc = nproc
self.names = set()
self.done = False
self.terminated = False
self.count = 0
self.thunk_scheduler = threading.Semaphore(0)
self.worker_available = threading.Semaphore(0)
self.worker_data = []
self.workers = []
self.queue = []
self.queue_lock = threading.Lock()
self.cv = threading.Condition(lock=self.queue_lock)
self.dt = threading.Thread(target=self.dispatcher, name='dispatcher')
self.running = {}
self.finished = {}
# setup worker threads and worker data
for i in range(nproc):
self.workers.append(threading.Thread(target=self.worker, name=f'worker_{i}', args=(i,)))
self.worker_data.append(self.__worker_data__())
# start threads
for w in self.workers:
w.start()
self.dt.start()
def __del__(self):
# timeout is necessary to prevent deadlock - see https://docs.python.org/3/reference/datamodel.html#object.__del__
self.terminate(timeout=10)
def schedule(self, name, cmd, ignore_sigint=True):
'''
Args
- name: hashable object (e.g., str, int, tuple, etc.)
- cmd: dict
Keyword arguments to pass to subprocess.Popen()
Keys (str) are argument names. Some of the args listed below:
- args: iterable of str, or str
Sequence of program arguments, where the program to execute is the first item in args.
On POSIX, if args is a string, the string is interpreted as the name or path of the program to execute.
- preexec_fn: callable
Called in the child process just before the child is executed
- ignore_sigint: bool. default=True
Ignore SIGINT (e.g., Ctrl-C) signals sent to worker processes. Useful to prevent keyboard interrupts from
interruping ThreadPool in interactive sessions (e.g., Jupyter Notebook).
Call ThreadPool.terminate() to terminate subprocesses.
'''
with self.queue_lock:
if self.terminated:
print('Pool has been terminated and can no longer schedule commands.', file=sys.stderr)
return
if name in self.names:
print(f'{name} has already been used as the name of a process. Not scheduling ...', file=sys.stderr)
return
self.names.add(name)
if ignore_sigint:
cmd.setdefault('preexec_fn', lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
self.queue.append((name, cmd))
self.count += 1
self.thunk_scheduler.release()
def get_available_worker(self):
for i in range(self.nproc):
if self.worker_data[i].available:
return i
return None
def dispatcher(self):
while True:
# block until the queue of outstanding functions is nonempty
self.thunk_scheduler.acquire()
if self.done:
return
# wait for a worker thread to become available and select it
self.worker_available.acquire()
i = self.get_available_worker()
assert i is not None
self.worker_data[i].available = False
# dequeue the least recently scheduled function
# put a copy of that function in a place where the selected worker (and only that worker) can find it
with self.queue_lock:
if not self.terminated:
name, cmd = self.queue.pop(0)
self.worker_data[i].name = name
self.worker_data[i].cmd = cmd
else:
self.worker_data[i].available = True
self.worker_available.release()
continue
# signal the worker thread to execute the thunk
self.worker_data[i].execute.release()
def worker(self, i):
self.worker_available.release()
while True:
# block until the dispatcher thread signals worker to execute an assigned function
self.worker_data[i].execute.acquire()
# if done but not available, that means the worker was assigned a thunk but not yet executed
# before the ThreadPool destructor was called
if self.done and self.worker_data[i].available:
return
# invoke the function, wait for it to execute
# acquiring the queue_lock is necessary to prevent a race condition with terminate()
with self.queue_lock:
if self.terminated:
self.worker_data[i].available = True
self.worker_data[i].name = None
self.worker_data[i].cmd = None
self.worker_available.release()
continue
name = self.worker_data[i].name
cmd = self.worker_data[i].cmd
p = subprocess.Popen(**cmd)
self.running[name] = p
# Calling p.wait() without any lock assumes that it is thread-safe, which appears to be the case based on
# https://bugs.python.org/issue21291. The subprocess module also only notes a lack of thread-safety once
# when using the preexec_fn parameter with subprocess.Popen(). Specifically, we assume that there is no race
# condition involved between calling p.wait() and p.terminate().
#
# Note: subprocess.Popen.wait() is currently implemented by CPython using busy waiting :(
p.wait()
with self.cv:
self.finished[name] = self.running.pop(name)
self.count -= 1
if self.count == 0:
self.cv.notify_all()
self.worker_data[i].available = True
self.worker_data[i].name = None
self.worker_data[i].cmd = None
self.worker_available.release()
def status(self, _print=True, _return=False):
'''
Args
- _print: bool. default=True
Print progress to stdout
- _return: bool. default=False
Return progress as (never_run, queued, running, error, success)
where each element of the tuple is an iterable of process names
Returns: 5-tuple or None
'''
success = []
error = []
with self.queue_lock:
running = list(self.running.keys())
for name, p in self.finished.items():
if p.poll() == 0:
success.append(name)
else:
error.append(name)
queued = [name for name, _ in self.queue]
never_run = self.names - set(running + success + error + queued)
if _print:
print('Never run:', never_run, end='\n\n')
print('Queued:', queued, end='\n\n')
print('Running:', running, end='\n\n')
print('Finished with error:', error, end='\n\n')
print('Finished successfully:', success, flush=True)
if _return:
return (never_run, queued, running, error, success)
def terminate(self, timeout=-1):
lock_acquired = self.queue_lock.acquire(timeout=timeout)
if lock_acquired:
try:
# clear queue
self.queue.clear()
# terminate processes
for p in self.running.values():
p.terminate()
# clear thunk_scheduler
while self.thunk_scheduler.acquire(blocking=False):
pass
self.terminated = True
finally:
self.queue_lock.release()
else:
print('terminate() called unsuccessfully. Could not acquire queue lock.', file=sys.stderr)
def join(self):
# wait until all scheduled functions have executed to completion
with self.cv:
if not self.terminated:
self.cv.wait_for(lambda: self.count == 0)
self.done = True
# inform dispatcher and worker threads to exit
self.thunk_scheduler.release()
for i in range(len(self.worker_data)):
self.worker_data[i].execute.release()
# wait for dispatcher and worker threads to exit
self.dt.join()
for w in self.workers:
w.join()
| []
| []
| [
"SLURM_CPUS_PER_TASK"
]
| [] | ["SLURM_CPUS_PER_TASK"] | python | 1 | 0 | |
test/sanity/sanity_test.go | /*
Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sanity_test
import (
"context"
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/coreos/rkt/tests/testutils/logger"
"github.com/google/uuid"
"github.com/kubernetes-csi/csi-test/v3/pkg/sanity"
"github.com/sirupsen/logrus"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
api "github.com/dell/csi-baremetal/api/generated/v1"
apiV1 "github.com/dell/csi-baremetal/api/v1"
accrd "github.com/dell/csi-baremetal/api/v1/availablecapacitycrd"
"github.com/dell/csi-baremetal/api/v1/drivecrd"
vcrd "github.com/dell/csi-baremetal/api/v1/volumecrd"
"github.com/dell/csi-baremetal/pkg/base"
"github.com/dell/csi-baremetal/pkg/base/featureconfig"
"github.com/dell/csi-baremetal/pkg/base/k8s"
"github.com/dell/csi-baremetal/pkg/base/linuxutils/lsblk"
"github.com/dell/csi-baremetal/pkg/base/rpc"
"github.com/dell/csi-baremetal/pkg/controller"
"github.com/dell/csi-baremetal/pkg/mocks"
mocklu "github.com/dell/csi-baremetal/pkg/mocks/linuxutils"
"github.com/dell/csi-baremetal/pkg/mocks/provisioners"
"github.com/dell/csi-baremetal/pkg/node"
p "github.com/dell/csi-baremetal/pkg/node/provisioners"
)
var (
controllerEndpoint = "unix:///tmp/controller.sock"
nodeEndpoint = "unix:///tmp/node.sock"
targetSanityPath = os.TempDir() + "/csi-mount/target"
stagingSanityPath = os.TempDir() + "/csi-staging"
driverName = "csi-baremetal-driver"
version = "test"
testNs = "default"
nodeId = "localhost"
testDrives = []*api.Drive{
{
UUID: "uuid-1",
SerialNumber: "hdd1",
Size: 1024 * 1024 * 1024 * 500,
Health: apiV1.HealthGood,
Status: apiV1.DriveStatusOnline,
Path: "/dev/sda",
Type: apiV1.DriveTypeHDD,
},
{
UUID: "uuid-2",
SerialNumber: "hdd2",
Size: 1024 * 1024 * 1024 * 200,
Health: apiV1.HealthGood,
Status: apiV1.DriveStatusOnline,
Path: "/dev/sdb",
Type: apiV1.DriveTypeHDD,
},
}
)
func skipIfNotSanity(t *testing.T) {
if os.Getenv("SANITY") == "" {
t.Skip("Skipping Sanity testing")
}
}
func TestDriverWithSanity(t *testing.T) {
skipIfNotSanity(t)
// Node and Controller must share Fake k8s client because sanity tests don't run under k8s env.
kubeClient, err := k8s.GetFakeKubeClient(testNs, logrus.New())
if err != nil {
panic(err)
}
nodeReady := make(chan bool)
defer close(nodeReady)
go newNodeSvc(kubeClient, nodeReady)
// wait until Node become initialized
<-nodeReady
go newControllerSvc(kubeClient)
config := sanity.NewTestConfig()
config.RemoveStagingPath = os.RemoveAll
config.Address = nodeEndpoint
config.ControllerAddress = controllerEndpoint
config.JUnitFile = "report.xml"
// Call sanity test suite
sanity.Test(t, config)
}
func newControllerSvc(kubeClient *k8s.KubeClient) {
ll, _ := base.InitLogger("", base.DebugLevel)
controllerService := controller.NewControllerService(kubeClient, ll, featureconfig.NewFeatureConfig())
csiControllerServer := rpc.NewServerRunner(nil, controllerEndpoint, false, ll)
csi.RegisterIdentityServer(csiControllerServer.GRPCServer, controller.NewIdentityServer(driverName, version))
csi.RegisterControllerServer(csiControllerServer.GRPCServer, controllerService)
ll.Info("Starting CSIControllerService")
if err := csiControllerServer.RunServer(); err != nil {
ll.Fatalf("fail to serve, error: %v", err)
os.Exit(1)
}
}
func newNodeSvc(kubeClient *k8s.KubeClient, nodeReady chan<- bool) {
ll, _ := base.InitLogger("", base.DebugLevel)
csiNodeService := prepareNodeMock(kubeClient, ll)
csiUDSServer := rpc.NewServerRunner(nil, nodeEndpoint, false, ll)
csi.RegisterNodeServer(csiUDSServer.GRPCServer, csiNodeService)
csi.RegisterIdentityServer(csiUDSServer.GRPCServer, csiNodeService)
go func() {
var doOnce sync.Once
for range time.Tick(10 * time.Second) {
err := csiNodeService.Discover()
if err != nil {
ll.Fatalf("Discover failed: %v", err)
}
doOnce.Do(func() {
drives := &drivecrd.DriveList{}
_ = kubeClient.ReadList(context.Background(), drives)
for _, d := range drives.Items {
name := uuid.New().String()
location := d.Name
var ac = accrd.AvailableCapacity{}
err = kubeClient.ReadCR(context.Background(), location, "", &ac)
if k8serrors.IsNotFound(err) {
acCR := kubeClient.ConstructACCR(name, api.AvailableCapacity{
Location: d.Spec.UUID,
NodeId: d.Spec.NodeId,
StorageClass: d.Spec.Type,
Size: d.Spec.Size,
})
if err := kubeClient.CreateCR(context.Background(), name, acCR); err != nil {
logger.Errorf("unable to create AC, error: %v", err)
}
}
}
nodeReady <- true
})
}
}()
go imitateVolumeManagerReconcile(kubeClient)
ll.Info("Starting CSINodeService")
if err := csiUDSServer.RunServer(); err != nil {
logger.Fatalf("fail to serve: %v", err)
}
}
// prepareNodeMock prepares instance of Node service with mocked drivemgr and mocked executor
func prepareNodeMock(kubeClient *k8s.KubeClient, log *logrus.Logger) *node.CSINodeService {
c := mocks.NewMockDriveMgrClient(testDrives)
e := mocks.NewMockExecutor(map[string]mocks.CmdOut{fmt.Sprintf(lsblk.CmdTmpl, ""): {Stdout: mocks.LsblkTwoDevicesStr}})
e.SetSuccessIfNotFound(true)
nodeService := node.NewCSINodeService(nil, nodeId, log, kubeClient, kubeClient,
new(mocks.NoOpRecorder), featureconfig.NewFeatureConfig())
nodeService.VolumeManager = *node.NewVolumeManager(c, e, log, kubeClient, kubeClient, new(mocks.NoOpRecorder), nodeId)
listBlk := mocklu.GetMockWrapLsblk("/some/path")
nodeService.VolumeManager.SetListBlk(listBlk)
pMock := provisioners.GetMockProvisionerSuccess("/some/path")
nodeService.SetProvisioners(map[p.VolumeType]p.Provisioner{p.DriveBasedVolumeType: pMock})
return nodeService
}
// imitateVolumeManagerReconcile imitates working of VolumeManager's Reconcile loop under not k8s env.
func imitateVolumeManagerReconcile(kubeClient *k8s.KubeClient) {
for range time.Tick(10 * time.Second) {
volumes := &vcrd.VolumeList{}
_ = kubeClient.ReadList(context.Background(), volumes)
for _, v := range volumes.Items {
if v.Spec.CSIStatus == apiV1.Creating {
v.Spec.CSIStatus = apiV1.Created
_ = kubeClient.UpdateCRWithAttempts(context.Background(), &v, 5)
}
if v.Spec.CSIStatus == apiV1.Removing {
v.Spec.CSIStatus = apiV1.Removed
_ = kubeClient.UpdateCRWithAttempts(context.Background(), &v, 5)
}
}
}
}
| [
"\"SANITY\""
]
| []
| [
"SANITY"
]
| [] | ["SANITY"] | go | 1 | 0 | |
piped/service.tac | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
import logging
import os
import sys
import json
import yaml
from twisted.internet import reactor
from piped import exceptions, resource, processing, service
logger = logging.getLogger('piped.service')
runtime_environment = processing.RuntimeEnvironment()
runtime_environment.configure()
application = runtime_environment.application
def _on_configuration_loaded():
service_name = runtime_environment.configuration_manager.get('service_name', 'piped')
# Set the process title, if we can.
try:
import setproctitle
setproctitle.setproctitle(service_name)
except ImportError:
# It's just a nicety, though, so don't blow up if we can't.
pass
logger.info('Starting service "%s" (PID %i).'%(service_name, os.getpid()))
provider_plugin_manager = resource.ProviderPluginManager()
provider_plugin_manager.configure(runtime_environment)
service_plugin_manager = service.ServicePluginManager()
service_plugin_manager.configure(runtime_environment)
# Move these into acting upon state changes.
runtime_environment.dependency_manager.resolve_initial_states()
def bootstrap():
configuration_file_path = os.environ.get('PIPED_CONFIGURATION_FILE', None)
overrides = json.loads(os.environ.get('PIPED_CONFIGURATION_OVERRIDES', '[]'))
try:
_fail_if_no_configuration_file_is_specified(configuration_file_path)
runtime_environment.configuration_manager.load_from_file(configuration_file_path)
_handle_configuration_overrides(runtime_environment.configuration_manager, overrides)
_on_configuration_loaded()
except:
logger.critical('Error while bootstrapping service.', exc_info=True)
reactor.stop()
def _handle_configuration_overrides(configuration_manager, overrides):
for override in overrides:
# in yaml, a mapping uses a colon followed by a space, but we want to be able to
# specify -O some.nested.option:42 on the command line, without the space, so we
# add a space after the first colon in the override specification, as doing so does
# not affect an otherwise correct yaml mapping.
adjusted_override = override.replace(':', ': ', 1)
override_as_dict = yaml.load(adjusted_override)
if not isinstance(override_as_dict, dict):
e_msg = 'Invalid override specification.'
detail = 'Expected a yaml mapping, but got %r.' % override
raise exceptions.ConfigurationError(e_msg, detail)
for path, value in override_as_dict.items():
logger.debug('Setting configuration override %r.'%path)
configuration_manager.set(path, value)
def _fail_if_no_configuration_file_is_specified(configuration_file_path):
if not configuration_file_path:
e_msg = 'No configuration file specified.'
detail = ('Either use the -c/--conf option of %r, or set the PIPED_CONFIGURATION_FILE environment '
'variable to the path of the configuration file.' % sys.argv[0])
raise exceptions.ConfigurationError(e_msg, detail)
# The callLater is necessary so providers don't start before forking if we're daemonizing.
reactor.callLater(0, bootstrap)
| []
| []
| [
"PIPED_CONFIGURATION_FILE",
"PIPED_CONFIGURATION_OVERRIDES"
]
| [] | ["PIPED_CONFIGURATION_FILE", "PIPED_CONFIGURATION_OVERRIDES"] | python | 2 | 0 | |
sensor/simulation/simulate.py | #!/usr/bin/python3
from signal import signal, SIGTERM
from concurrent.futures import ThreadPoolExecutor
import subprocess
import socket
import random
import time
import os
import re
simulated_root="/mnt/simulated"
files=[f for f in os.listdir(simulated_root) if re.search(os.environ["FILES"],f)]
rtsp_port=int(os.environ["RTSP_PORT"])
rtp_port=int(os.environ["RTP_PORT"])
port_step=int(os.environ["PORT_STEP"]) if "PORT_STEP" in os.environ else 100
ncameras=int(os.environ["NCAMERAS"])
def serve_stream(file1, rtsp_port1, rtp_port1):
rtsp="rtsp://@:"+str(rtsp_port1)+"/live.sdp"
while True:
subprocess.call(["/usr/bin/cvlc","-vvv",file1,"--loop",":sout=#gather:rtp{sdp="+rtsp+",port="+str(rtp_port1)+"}",":network-caching:1500",":sout-all",":sout-keep"])
time.sleep(10)
def quit_service(signum, sigframe):
exit(143)
signal(SIGTERM, quit_service)
with ThreadPoolExecutor(ncameras) as e:
for i in range(ncameras):
e.submit(serve_stream, simulated_root+"/"+files[i%len(files)],rtsp_port+i*port_step,rtp_port+i*port_step)
| []
| []
| [
"PORT_STEP",
"RTSP_PORT",
"RTP_PORT",
"NCAMERAS",
"FILES"
]
| [] | ["PORT_STEP", "RTSP_PORT", "RTP_PORT", "NCAMERAS", "FILES"] | python | 5 | 0 | |
integration-cli/docker_test_vars.go | package main
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"github.com/docker/docker/pkg/reexec"
)
var (
// the docker binary to use
dockerBinary = "docker"
// the private registry image to use for tests involving the registry
registryImageName = "registry"
// the private registry to use for tests
privateRegistryURL = "127.0.0.1:5000"
runtimePath = "/var/run/docker"
execDriverPath = runtimePath + "/execdriver/native"
workingDirectory string
// isLocalDaemon is true if the daemon under test is on the same
// host as the CLI.
isLocalDaemon bool
// daemonPlatform is held globally so that tests can make intelligent
// decisions on how to configure themselves according to the platform
// of the daemon. This is initialized in docker_utils by sending
// a version call to the daemon and examining the response header.
daemonPlatform string
// windowsDaemonKV is used on Windows to distinguish between different
// versions. This is necessary to enable certain tests based on whether
// the platform supports it. For example, Windows Server 2016 TP3 does
// not support volumes, but TP4 does.
windowsDaemonKV int
// daemonDefaultImage is the name of the default image to use when running
// tests. This is platform dependent.
daemonDefaultImage string
// For a local daemon on Linux, these values will be used for testing
// user namespace support as the standard graph path(s) will be
// appended with the root remapped uid.gid prefix
dockerBasePath string
volumesConfigPath string
containerStoragePath string
)
const (
// WindowsBaseImage is the name of the base image for Windows testing
WindowsBaseImage = "windowsservercore"
// DefaultImage is the name of the base image for the majority of tests that
// are run across suites
DefaultImage = "busybox"
)
func init() {
reexec.Init()
if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" {
dockerBinary = dockerBin
}
var err error
dockerBinary, err = exec.LookPath(dockerBinary)
if err != nil {
fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)", err)
os.Exit(1)
}
if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" {
registryImageName = registryImage
}
if registry := os.Getenv("REGISTRY_URL"); registry != "" {
privateRegistryURL = registry
}
workingDirectory, _ = os.Getwd()
// Deterministically working out the environment in which CI is running
// to evaluate whether the daemon is local or remote is not possible through
// a build tag.
//
// For example Windows CI under Jenkins test the 64-bit
// Windows binary build with the daemon build tag, but calls a remote
// Linux daemon.
//
// We can't just say if Windows then assume the daemon is local as at
// some point, we will be testing the Windows CLI against a Windows daemon.
//
// Similarly, it will be perfectly valid to also run CLI tests from
// a Linux CLI (built with the daemon tag) against a Windows daemon.
if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 {
isLocalDaemon = false
} else {
isLocalDaemon = true
}
// This is only used for a tests with local daemon true (Linux-only today)
// default is "/var/lib/docker", but we'll try and ask the
// /info endpoint for the specific root dir
dockerBasePath = "/var/lib/docker"
type Info struct {
DockerRootDir string
}
var i Info
status, b, err := sockRequest("GET", "/info", nil)
if err == nil && status == 200 {
if err = json.Unmarshal(b, &i); err == nil {
dockerBasePath = i.DockerRootDir
}
}
volumesConfigPath = dockerBasePath + "/volumes"
containerStoragePath = dockerBasePath + "/containers"
}
| [
"\"DOCKER_BINARY\"",
"\"REGISTRY_IMAGE\"",
"\"REGISTRY_URL\"",
"\"DOCKER_REMOTE_DAEMON\""
]
| []
| [
"REGISTRY_URL",
"REGISTRY_IMAGE",
"DOCKER_BINARY",
"DOCKER_REMOTE_DAEMON"
]
| [] | ["REGISTRY_URL", "REGISTRY_IMAGE", "DOCKER_BINARY", "DOCKER_REMOTE_DAEMON"] | go | 4 | 0 | |
tzlocal/unix.py | import os
import pytz
import re
from tzlocal import utils
_cache_tz = None
def _tz_from_env(tzenv):
if tzenv[0] == ':':
tzenv = tzenv[1:]
# TZ specifies a file
if os.path.exists(tzenv):
with open(tzenv, 'rb') as tzfile:
return pytz.tzfile.build_tzinfo('local', tzfile)
# TZ specifies a zoneinfo zone.
try:
tz = pytz.timezone(tzenv)
# That worked, so we return this:
return tz
except pytz.UnknownTimeZoneError:
raise pytz.UnknownTimeZoneError(
"tzlocal() does not support non-zoneinfo timezones like %s. \n"
"Please use a timezone in the form of Continent/City")
def _try_tz_from_env():
tzenv = os.environ.get('TZ')
if tzenv:
try:
return _tz_from_env(tzenv)
except pytz.UnknownTimeZoneError:
pass
def _get_localzone(_root='/'):
"""Tries to find the local timezone configuration.
This method prefers finding the timezone name and passing that to pytz,
over passing in the localtime file, as in the later case the zoneinfo
name is unknown.
The parameter _root makes the function look for files like /etc/localtime
beneath the _root directory. This is primarily used by the tests.
In normal usage you call the function without parameters."""
tzenv = _try_tz_from_env()
if tzenv:
return tzenv
# Now look for distribution specific configuration files
# that contain the timezone name.
for configfile in ('etc/timezone', 'var/db/zoneinfo'):
tzpath = os.path.join(_root, configfile)
try:
with open(tzpath, 'rb') as tzfile:
tzfile_lines = tzfile.readlines()
for l in tzfile_lines:
# Skip lines which are commented
if l.startswith(b'#'):
continue
# Issue #3 was that /etc/timezone was a zoneinfo file.
# That's a misconfiguration, but we need to handle it gracefully:
if l[:5] == b'TZif2':
continue
etctz = l.strip().decode()
if not etctz:
# Empty file, skip
continue
# Get rid of host definitions and comments:
if ' ' in etctz:
etctz, dummy = etctz.split(' ', 1)
if '#' in etctz:
etctz, dummy = etctz.split('#', 1)
# Return the first valid line we find.
return pytz.timezone(etctz.replace(' ', '_'))
except IOError:
# File doesn't exist or is a directory
continue
# CentOS has a ZONE setting in /etc/sysconfig/clock,
# OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and
# Gentoo has a TIMEZONE setting in /etc/conf.d/clock
# We look through these files for a timezone:
zone_re = re.compile(r'\s*ZONE\s*=\s*\"')
timezone_re = re.compile(r'\s*TIMEZONE\s*=\s*\"')
end_re = re.compile('\"')
for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'):
tzpath = os.path.join(_root, filename)
try:
with open(tzpath, 'rt') as tzfile:
data = tzfile.readlines()
for line in data:
# Look for the ZONE= setting.
match = zone_re.match(line)
if match is None:
# No ZONE= setting. Look for the TIMEZONE= setting.
match = timezone_re.match(line)
if match is not None:
# Some setting existed
line = line[match.end():]
etctz = line[:end_re.search(line).start()]
# We found a timezone
return pytz.timezone(etctz.replace(' ', '_'))
except IOError:
# File doesn't exist or is a directory
continue
# systemd distributions use symlinks that include the zone name,
# see manpage of localtime(5) and timedatectl(1)
tzpath = os.path.join(_root, 'etc/localtime')
if os.path.exists(tzpath) and os.path.islink(tzpath):
tzpath = os.path.realpath(tzpath)
start = tzpath.find("/")+1
while start is not 0:
tzpath = tzpath[start:]
try:
return pytz.timezone(tzpath)
except pytz.UnknownTimeZoneError:
pass
start = tzpath.find("/")+1
# Are we under Termux on Android? It's not officially supported, because
# there is no reasonable way to run tests for this, but let's make an effort.
if os.path.exists('/system/bin/getprop'):
import subprocess
androidtz = subprocess.check_output(['getprop', 'persist.sys.timezone'])
return pytz.timezone(androidtz.strip().decode())
# No explicit setting existed. Use localtime
for filename in ('etc/localtime', 'usr/local/etc/localtime'):
tzpath = os.path.join(_root, filename)
if not os.path.exists(tzpath):
continue
with open(tzpath, 'rb') as tzfile:
return pytz.tzfile.build_tzinfo('local', tzfile)
raise pytz.UnknownTimeZoneError('Can not find any timezone configuration')
def get_localzone():
"""Get the computers configured local timezone, if any."""
global _cache_tz
if _cache_tz is None:
_cache_tz = _get_localzone()
utils.assert_tz_offset(_cache_tz)
return _cache_tz
def reload_localzone():
"""Reload the cached localzone. You need to call this if the timezone has changed."""
global _cache_tz
_cache_tz = _get_localzone()
utils.assert_tz_offset(_cache_tz)
return _cache_tz
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
train_on_the_fly/train_resnet_main.py | import argparse
import os
import pickle
import random
import sys
import time
import numpy as np
import pandas as pd
import resnet_10 as resnet1
import resnet_10_copy as resnet2
import resnet_10_copy2 as resnet3
import tensorflow as tf
import resnet_utils as utils
from tensorflow.python.client import device_lib
# from AdamWOptimizer import create_optimizer
def append_frames(cut_frame_array, required_num_frames):
appended_list_of_frames = list(cut_frame_array)
num_curr_frames = cut_frame_array.shape[0]
num_more_frames = required_num_frames - num_curr_frames
for i in range(num_more_frames):
appended_list_of_frames.append(cut_frame_array[i % num_curr_frames])
return np.array(appended_list_of_frames)
def train_neural_network(x_inpuT,
y_inpuT,
data_path,
video_store,
save_loss_path,
save_model_path,
image_height,
image_width,
batch_size,
val_batch_size,
learning_rate,
weight_decay,
epochs,
which_model,
num_val_videos,
random_clips,
win_size,
ignore_factor,
dropout,
dropout_rate):
with tf.name_scope("cross_entropy"):
prediction = 0
if which_model == 1:
prediction = resnet1.inference(x_inpuT, dropout_rate, dropout)
elif which_model == 2:
prediction = resnet2.inference(x_inpuT)
if which_model == 3:
prediction = resnet3.inference(x_inpuT)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y_inpuT))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# optimizer = 0
if weight_decay is not None:
print("weight decay applied.")
optimizer = create_optimizer(cost, learning_rate, weight_decay)
else:
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.name_scope("accuracy"):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y_inpuT, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
saver = tf.train.Saver(save_relative_paths=True)
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
print("session starts!")
sess.run(tf.global_variables_initializer())
start_time = time.time()
epoch_loss_list = []
val_loss_list = []
train_file = 'train_boxes_with_labels.pkl'
val_file = 'valid_boxes_with_labels.pkl'
with open(os.path.join(data_path, train_file), 'rb') as f:
train_boxes_list = pickle.load(f)
with open(os.path.join(data_path, val_file), 'rb') as f:
valid_boxes_list = pickle.load(f)
for epoch in range(epochs):
print("Epoch {} started!".format(epoch + 1))
epoch_start_time = time.time()
epoch_loss = 0
train_acc = 0
num_batch_completed = 0
window_size = win_size
num_clips_per_video = random_clips
random.seed(7)
print("Random seed fixed for training.")
num_videos = len(train_boxes_list) - (len(train_boxes_list) % batch_size)
num_blocks = int(num_videos / batch_size)
block_x = np.zeros((num_clips_per_video, batch_size, depth, image_height, image_width, 3))
block_y = np.zeros((num_clips_per_video, batch_size, num_classes))
for block_index in range(num_blocks):
for index_in_batch, lst_item in enumerate(
train_boxes_list[block_index * batch_size: (block_index + 1) * batch_size]):
cut_frame_array = utils.get_frame_array(lst_item, video_store, image_height, image_width)
label = lst_item[2]
num_frames = cut_frame_array.shape[0]
required_num_frames = int(window_size * ignore_factor)
if num_frames <= required_num_frames:
cut_frame_array = append_frames(cut_frame_array, required_num_frames)
num_frames = cut_frame_array.shape[0]
for batch_index in range(num_clips_per_video):
start_frame = random.randint(0, num_frames - window_size)
end_frame = start_frame + window_size
block_x[batch_index, index_in_batch, :, :, :, :] = np.array(
cut_frame_array[start_frame: end_frame, :, :, :])
basic_line = [0] * num_classes
basic_line[int(label)] = 1
basic_label = basic_line
block_y[batch_index, index_in_batch, :] = np.array(basic_label)
for batch_index in range(num_clips_per_video):
batch_start_time = time.time()
mini_batch_x = block_x[batch_index, :, :, :, :, :]
mini_batch_x = mini_batch_x / 255.0
mini_batch_y = block_y[batch_index, :, :]
perm = np.random.permutation(batch_size)
mini_batch_x = mini_batch_x[perm]
mini_batch_y = mini_batch_y[perm]
_optimizer, _cost, _prediction, _accuracy = sess.run([optimizer, cost, prediction, accuracy],
feed_dict={x_inpuT: mini_batch_x,
y_inpuT: mini_batch_y})
epoch_loss += _cost
train_acc += _accuracy
num_batch_completed += 1
batch_end_time = time.time()
log1 = "\rEpoch: {}, " \
"batches completed: {}, " \
"time taken: {:.5f}, " \
"loss: {:.6f}, " \
"accuracy: {:.4f} \n". \
format(
epoch + 1,
num_batch_completed,
batch_end_time - batch_start_time,
epoch_loss / (batch_size * num_batch_completed),
_accuracy)
print(log1)
sys.stdout.flush()
del block_x, block_y
# validation loss
val_loss = 0
val_acc = 0
val_num_batch_completed = 0
num_clips_per_video = 1
val_num_videos = num_val_videos
val_num_blocks = int(val_num_videos / val_batch_size)
val_block_x = np.zeros((num_clips_per_video, val_batch_size, depth, image_height, image_width, 3))
val_block_y = np.zeros((num_clips_per_video, val_batch_size, num_classes))
random.seed(23)
print("Random seed fixed for validation.")
for block_index in range(val_num_blocks):
print("{}/{} validation block started.".format(block_index + 1, val_num_blocks))
for index_in_batch, lst_item in enumerate(
valid_boxes_list[block_index * val_batch_size: (block_index + 1) * val_batch_size]):
cut_frame_array = utils.get_frame_array(lst_item, video_store, image_height, image_width)
label = lst_item[2]
num_frames = cut_frame_array.shape[0]
required_num_frames = int(window_size * ignore_factor)
if num_frames <= window_size:
cut_frame_array = append_frames(cut_frame_array, required_num_frames)
num_frames = cut_frame_array.shape[0]
for batch_index in range(num_clips_per_video):
start_frame = random.randint(0, num_frames - window_size)
end_frame = start_frame + window_size
val_block_x[batch_index, index_in_batch, :, :, :, :] = np.array(
cut_frame_array[start_frame: end_frame, :, :, :])
basic_line = [0] * num_classes
basic_line[int(label)] = 1
basic_label = basic_line
val_block_y[batch_index, index_in_batch, :] = np.array(basic_label)
for batch_index in range(num_clips_per_video):
val_batch_x = val_block_x[batch_index, :, :, :, :, :]
val_batch_x = val_batch_x / 255.0
val_batch_y = val_block_y[batch_index, :, :]
val_cost, val_batch_accuracy = sess.run([cost, accuracy],
feed_dict={x_inpuT: val_batch_x, y_inpuT: val_batch_y})
val_acc += val_batch_accuracy
val_loss += val_cost
val_num_batch_completed += 1
del val_block_x, val_block_y
epoch_loss = epoch_loss / (batch_size * num_batch_completed)
train_acc = train_acc / num_batch_completed
val_loss /= (val_batch_size * val_num_batch_completed)
val_acc = val_acc / val_num_batch_completed
epoch_end_time = time.time()
log3 = "Epoch {} done; " \
"Time Taken: {:.4f}s; " \
"Train_loss: {:.6f}; " \
"Val_loss: {:.6f}; " \
"Train_acc: {:.4f}; " \
"Val_acc: {:.4f}; " \
"Train batches: {}; " \
"Val batches: {};\n". \
format(epoch + 1, epoch_end_time - epoch_start_time, epoch_loss, val_loss, train_acc, val_acc,
num_batch_completed, val_num_batch_completed)
print(log3)
if save_loss_path is not None:
file1 = open(save_loss_path, "a")
file1.write(log3)
file1.close()
epoch_loss_list.append(epoch_loss)
val_loss_list.append(val_loss)
if save_model_path is not None:
saver.save(sess, save_model_path)
end_time = time.time()
print('Time elapse: ', str(end_time - start_time))
print(epoch_loss_list)
if save_loss_path is not None:
file1 = open(save_loss_path, "a")
file1.write("Train Loss List: {} \n".format(str(epoch_loss_list)))
file1.write("Val Loss List: {} \n".format(str(val_loss_list)))
file1.close()
if __name__ == '__main__':
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('-cs', action='store', dest='check_singularity', type=int)
parser.add_argument('-ih', action='store', dest='height', type=int)
parser.add_argument('-iw', action='store', dest='width', type=int)
parser.add_argument('-bs', action='store', dest='batch_size', type=int)
parser.add_argument('-vbs', action='store', dest='val_batch_size', type=int)
parser.add_argument('-lr', action='store', dest='learning_rate', type=float)
parser.add_argument('-wd', action='store', dest='weight_decay', type=float, const=None)
parser.add_argument('-e', action='store', dest='epochs', type=int)
parser.add_argument('-nvv', action='store', dest='num_val_videos', type=int)
parser.add_argument('-rc', action='store', dest='random_clips', type=int)
parser.add_argument('-ws', action='store', dest='win_size', type=int)
parser.add_argument('-slp', action='store', dest='save_loss_path', const=None)
parser.add_argument('-smp', action='store', dest='save_model_path', const=None)
parser.add_argument('-mn', action='store', dest='model_num', type=int)
parser.add_argument('-vd', action='store', dest='visible_devices')
parser.add_argument('-nd', action='store', dest='num_device', type=int)
parser.add_argument('-if', action='store', dest='ign_fact', type=float, const=None)
parser.add_argument('-dp', action='store', dest='dropout', type=int, const=None)
parser.add_argument('-dr', action='store', dest='dropout_rate', type=float, const=None)
results = parser.parse_args()
arg_check_singularity = results.check_singularity
arg_height = results.height
arg_width = results.width
arg_batch_size = results.batch_size
arg_val_batch_size = results.val_batch_size
arg_lr = results.learning_rate
arg_wd = results.weight_decay
arg_epochs = results.epochs
arg_num_val_videos = results.num_val_videos
arg_random_clips = results.random_clips
arg_win_size = results.win_size
arg_save_loss_path = results.save_loss_path
arg_save_model_path = results.save_model_path
arg_model_num = results.model_num
arg_visible_devices = results.visible_devices
arg_num_device = results.num_device
arg_ign_fact = results.ign_fact
arg_dropout = results.dropout
arg_dropout_rate = results.dropout_rate
data_path = "/home/axp798/axp798gallinahome/data/jester/boxes/"
if arg_check_singularity:
data_path = "/scratch/jester/boxes/"
video_store = "/home/axp798/axp798gallinahome/jester_datase/20bn-jester-v1/"
if arg_check_singularity:
video_store = "/tmp/20bn-jester-v1/"
save_loss = "/home/axp798/axp798gallinahome/Gesture-Detection/models/loss_jester/"
if arg_check_singularity:
save_loss = "/home/models/loss_jester/"
save_model = "/home/axp798/axp798gallinahome/Gesture-Detection/models/saved_models_jester/"
if arg_check_singularity:
save_model = "/home/models/saved_models_jester/"
ar_save_loss_path = None
if arg_save_loss_path is not None:
ar_save_loss_path = save_loss + "{}".format(arg_save_loss_path)
ar_save_model_path = None
if arg_save_model_path is not None:
path = save_model + "{}/".format(arg_save_model_path)
if not os.path.exists(path):
os.mkdir(path)
ar_save_model_path = path + "model"
if ar_save_loss_path is not None:
file1 = open(ar_save_loss_path, "w")
file1.write("Params: {} \n".format(results))
file1.write("Losses: \n")
file1.close()
depth = arg_win_size
num_classes = 27
os.environ['CUDA_VISIBLE_DEVICES'] = "{}".format(arg_visible_devices)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
print(device_lib.list_local_devices())
choose_device = "/device:GPU:{}".format(arg_num_device)
with tf.device(choose_device):
x_inpuT = tf.placeholder(tf.float32, shape=[arg_batch_size, depth, arg_height, arg_width, 3])
y_inpuT = tf.placeholder(tf.float32, shape=[arg_batch_size, num_classes])
train_neural_network(x_inpuT, y_inpuT, data_path, video_store,
save_loss_path=ar_save_loss_path,
save_model_path=ar_save_model_path,
image_height=arg_height,
image_width=arg_width,
batch_size=arg_batch_size,
learning_rate=arg_lr,
weight_decay=arg_wd,
epochs=arg_epochs,
val_batch_size=arg_val_batch_size,
which_model=arg_model_num,
num_val_videos=arg_num_val_videos,
random_clips=arg_random_clips,
win_size=arg_win_size,
ignore_factor=arg_ign_fact,
dropout=arg_dropout,
dropout_rate=arg_dropout_rate)
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
examples/A3C-Gym/train-atari-qfunc-supervised.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train-atari.py
# Author: Yuxin Wu
import argparse
import cv2
import gym
import multiprocessing as mp
import numpy as np
import pickle
import os
import six
import sys
import uuid
import tensorflow as tf
from six.moves import queue
from tensorpack import *
from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient
from tensorpack.utils.concurrency import ensure_proc_terminate, start_proc_mask_signal
from tensorpack.utils.gpu import get_num_gpu
from tensorpack.utils.serialize import dumps
from atari_wrapper import FireResetEnv, FrameStack, LimitLength, MapState
from common import Evaluator, eval_model_multithread, play_n_episodes
from simulator import SimulatorMaster, SimulatorProcess, TransitionExperience
if six.PY3:
from concurrent import futures
CancelledError = futures.CancelledError
else:
CancelledError = Exception
IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4
GAMMA = 0.99
STATE_SHAPE = IMAGE_SIZE + (3, )
LOCAL_TIME_MAX = 5
STEPS_PER_EPOCH = 6000
EVAL_EPISODE = 50
BATCH_SIZE = 128
PREDICT_BATCH_SIZE = 16 # batch for efficient forward
SIMULATOR_PROC = mp.cpu_count() * 2
PREDICTOR_THREAD_PER_GPU = 4
NUM_ACTIONS = None
ENV_NAME = None
LOG_DIRNAME = None
MODEL_DIRNAME = None
EPOCHES = 100
dirname = '/mnt/research/judy/reward_shaping/sanity_qfunc_learn/'
def process_rewards(rewards, episodes=10):
discounted_rewards = []
for i in range(episodes):
rs = rewards[i]
discounted_r = np.zeros((rs.shape[0], 1))
rs = np.clip(rs, -1, 1)
R = 0
for t in reversed(range(len(rs))):
R = R * GAMMA + rs[t]
discounted_r[t] = R
discounted_rewards.append(discounted_r)
#break
return discounted_rewards
def find_available_data(pathdir="/mnt/research/judy/reward_shaping/expert_data/"):
file_ids = []
for file in os.listdir(pathdir):
if file.endswith(".npz"):
file_id = file.strip(".npz").split("_")[1]
file_ids.append(file_id)
return file_ids
def get_player(train=False, dumpdir=None):
env = gym.make(ENV_NAME)
if dumpdir:
env = gym.wrappers.Monitor(env, dumpdir, video_callable=lambda _: True)
env = FireResetEnv(env)
env = MapState(env, lambda im: cv2.resize(im, IMAGE_SIZE))
env = FrameStack(env, 4)
if train:
env = LimitLength(env, 60000)
return env
class SupervisedModel(ModelDesc):
def inputs(self):
assert NUM_ACTIONS is not None
return [tf.TensorSpec((None,) + STATE_SHAPE + (FRAME_HISTORY, ), tf.uint8, 'state'),
tf.TensorSpec((None,), tf.int64, 'action'),
tf.TensorSpec((None,), tf.float32, 'futurereward'),
]
def _get_NN_prediction(self, state):
assert state.shape.rank == 5 # Batch, H, W, Channel, History
state = tf.transpose(state, [0, 1, 2, 4, 3]) # swap channel & history, to be compatible with old models
image = tf.reshape(state, [-1] + list(STATE_SHAPE[:2]) + [STATE_SHAPE[2] * FRAME_HISTORY])
image = tf.cast(image, tf.float32) / 255.0
with argscope(Conv2D, activation=tf.nn.relu):
l = Conv2D('conv0', image, 32, 5)
l = MaxPooling('pool0', l, 2)
l = Conv2D('conv1', l, 32, 5)
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv2', l, 64, 4)
l = MaxPooling('pool2', l, 2)
l = Conv2D('conv3', l, 64, 3)
l = FullyConnected('fc0', l, 512)
l = PReLU('prelu', l)
logits = FullyConnected('fc-pi', l, NUM_ACTIONS) # unnormalized policy
q_value = FullyConnected('fc-v', l, 1)
return logits, q_value
def build_graph(self, resume=False):
## create graph, session
tf.reset_default_graph()
sess = tf.Session()
action = tf.placeholder(dtype=tf.int64, shape=(None,1))
state = tf.placeholder(dtype=tf.uint8, shape= (None,) + STATE_SHAPE + (FRAME_HISTORY, ) )
futurereward = tf.placeholder(dtype=tf.float32, shape=(None,1))
logits, q_value = self._get_NN_prediction(state)
policy = tf.nn.softmax(logits, name='policy')
log_probs = tf.log(policy + 1e-6)
one_hot_actions = tf.one_hot(action, NUM_ACTIONS)
one_hot_actions = tf.reshape(one_hot_actions, [-1, NUM_ACTIONS])
xentropy_loss = tf.losses.softmax_cross_entropy(
one_hot_actions, # one-hot-labels
logits, # logits
)
value_loss = tf.nn.l2_loss(q_value - futurereward, name='q_value_loss')
entropy_beta = tf.get_variable(
'entropy_beta',
shape=[],
initializer=tf.constant_initializer(0.01),
trainable=False
)
cost = tf.add_n([xentropy_loss * entropy_beta, value_loss])
confience_a_given_s = tf.reduce_mean(
tf.reduce_sum(
policy * one_hot_actions, 1)
)
lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
optimizer_op = tf.train.AdamOptimizer(lr, epsilon=1e-3).minimize(cost)
########### Add gradient clipping #########
# opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)#.minimize(cost)
# gvs = opt.compute_gradients(cost)
# capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
# tf.clip_by_norm(grad, 0.1 * tf.cast(tf.size(grad), tf.float32))
# optimizer_op = opt.apply_gradients(capped_gvs)
########### Add gradient clipping #########
# Create a summary to monitor cost tensors
tf.summary.scalar("loss", cost)
tf.summary.scalar("cross_entropy_loss", xentropy_loss)
tf.summary.scalar("q_value_loss", value_loss)
# Create a summary to monitor confidence tensor
tf.summary.scalar("mean_pi_a_given_s", confience_a_given_s)
# Merge all summaries into a single op
merged = tf.summary.merge_all()
# Create a summary to log real future rewards
tf.summary.scalar("futurereward", futurereward)
## TBD load parameter, or init parameter
saver = tf.compat.v1.train.Saver()
if resume:
print('loading and building pretrained policy')
saver.restore(sess, tf.train.latest_checkpoint(MODEL_DIRNAME))
print('loaded and built successfully')
else:
init = tf.global_variables_initializer()
sess.run(init)
print('model initialized successfully')
writer = tf.compat.v1.summary.FileWriter(LOG_DIRNAME, sess.graph)
results = {}
results["cost"] = cost
results["policy"] = policy
results["logits"] = logits
results["merged"] = merged
results["writer"] = writer
results["actions_ph"] = action
results["futurereward_ph"] = futurereward
results["states_ph"] = state
results["optimizer"] = optimizer_op
results["saver"] = saver
results["q_value"] = q_value
results["futurereward"] = futurereward
#self.writer = writer
#self.optimizer = opt
#self.actions_ph = action
#self.futurereward_ph = futurereward
#self.states_ph = state
self.handler = results
self.sess = sess
def train(self, file_ids, epoches=1, initial_episode=0):
episode_index = initial_episode
for epoch in range(epoches):
for file_id in file_ids:
states, actions, rewards = self.load_data(file_id=file_id)
episodes = len(rewards) # how many episodes are in this file
rewards = process_rewards(rewards, episodes=episodes) # get discounted rewards
## start training
##### DEBUG ######
#weights = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name == 'fc-pi/b:0']
##### DEBUG ######
for e in range(episodes):
episode_index += 1
# get each episode
print("File id = {}, Episode id ={}".format(file_id, episode_index))
e_state, e_action, e_reward = states[e], actions[e], rewards[e]
# state steps should be 1 more than action/reward steps
stride = BATCH_SIZE
pos, frame_size = 0, len(e_action)
while True:
end = frame_size if pos+stride>=frame_size else pos+stride
batch_x = np.reshape(e_state[pos:end], (-1,) + STATE_SHAPE + (FRAME_HISTORY,) )
batch_y = np.reshape(e_action[pos:end], (-1, 1))
batch_r = np.reshape(e_reward[pos:end], (-1,1))
_, loss_val, tf_summary = self.sess.run(
[
self.handler["optimizer"],
self.handler["cost"],
self.handler["merged"],
# weights[0], # DEBUG
],
feed_dict={
self.handler["states_ph"]:batch_x,
self.handler["futurereward_ph"]:batch_r,
self.handler["actions_ph"]:batch_y
}
)
pos = end
## release memory space for each mini-batch
del batch_x, batch_y, batch_r
if pos >= frame_size:
# end of pisode
break
## print("Weight value: ", weight)
information = "Update Episode {:2d}, Episode Length {:5d}, Running Loss {:.4f}".format(episode_index, frame_size, loss_val)
logger.info(information)
self.handler["writer"].add_summary(tf_summary, episode_index)
## save session and Episode index
self.handler["saver"].save(self.sess, os.path.join(MODEL_DIRNAME, "checkpoint.ckpt") )
fp = open(os.path.join(MODEL_DIRNAME, "step.p"), "wb")
pickle.dump(episode_index, fp)
fp.close()
del states, actions, rewards
#loss_summary = tf.Summary(value=[tf.Summary.Value(tag="running_loss", simple_value=loss_val)])
#writer.add_summary(loss_summary, global_step=episode_number)
def load_data(self, pathdir="/mnt/research/judy/reward_shaping/expert_data/", file_id=1):
path = os.path.join(pathdir, "batch_{}.npz".format(file_id))
data = np.load(path, allow_pickle=True)
states = data["observations"]
actions = data["actions"]
rewards = data["rewards"]
return states, actions, rewards
def train(args):
assert tf.test.is_gpu_available(), "Training requires GPUs!"
logger.set_logger_dir(LOG_DIRNAME)
# assign GPUs for training & inference
num_gpu = get_num_gpu()
if num_gpu == 0:
logger.warn("Training without GPU !")
exit()
# setup model
model=SupervisedModel()
model.build_graph(resume=args.resume)
# training model using loaded expert data
file_ids = sorted(find_available_data())
#N = int( min(10, len(file_ids) * 0.3))
#file_ids = file_ids[:N]
step_file = os.path.join(MODEL_DIRNAME, "step.p")
if args.resume and os.path.exists(step_file):
with open(step_file, 'rb') as f:
initial_episode = pickle.load(f)
else:
initial_episode = 0
model.train(file_ids, epoches=EPOCHES, initial_episode=initial_episode)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model', default="/mnt/research/judy/reward_shaping/Pong-v0.npz", type=str)
parser.add_argument('--env', help='env', default="Pong-v0", type=str)
parser.add_argument('--task', help='task to perform',
choices=['play', 'eval', 'train', 'dump_video'], default='train')
parser.add_argument('--output', help='output directory for submission', default='output_dir')
parser.add_argument('--episode', help='number of episode to eval', default=1, type=int)
parser.add_argument('--render', help='If render the environment', default=False, type=bool)
parser.add_argument('--save', help='If save episodes', default=False, type=bool)
parser.add_argument('--save_id', help='Index of Batches to be collected', default=1, type=int)
parser.add_argument('--resume', help='Resume Model', default=True)
args = parser.parse_args()
ENV_NAME = args.env
NUM_ACTIONS = get_player().action_space.n
LOG_DIRNAME = os.path.join(dirname, 'supervised-atari-{}'.format(ENV_NAME))
MODEL_DIRNAME = os.path.join(dirname, "model_checkpoint")
logger.info("Environment: {}, number of actions: {}".format(ENV_NAME, NUM_ACTIONS))
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
train(args)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
libmachine/host.go | package libmachine
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"github.com/docker/machine/drivers"
"github.com/docker/machine/libmachine/auth"
"github.com/docker/machine/libmachine/engine"
"github.com/docker/machine/libmachine/provision"
"github.com/docker/machine/libmachine/provision/pkgaction"
"github.com/docker/machine/libmachine/swarm"
"github.com/docker/machine/log"
"github.com/docker/machine/ssh"
"github.com/docker/machine/state"
"github.com/docker/machine/utils"
)
var (
validHostNameChars = `[a-zA-Z0-9\-\.]`
validHostNamePattern = regexp.MustCompile(`^` + validHostNameChars + `+$`)
errMachineMustBeRunningForUpgrade = errors.New("Error: machine must be running to upgrade.")
)
type Host struct {
Name string `json:"-"`
DriverName string
Driver drivers.Driver
StorePath string
HostOptions *HostOptions
// deprecated options; these are left to assist in config migrations
SwarmHost string
SwarmMaster bool
SwarmDiscovery string
CaCertPath string
PrivateKeyPath string
ServerCertPath string
ServerKeyPath string
ClientCertPath string
ClientKeyPath string
}
type HostOptions struct {
Driver string
Memory int
Disk int
EngineOptions *engine.EngineOptions
SwarmOptions *swarm.SwarmOptions
AuthOptions *auth.AuthOptions
}
type HostMetadata struct {
DriverName string
HostOptions HostOptions
StorePath string
CaCertPath string
PrivateKeyPath string
ServerCertPath string
ServerKeyPath string
ClientCertPath string
}
type HostListItem struct {
Name string
Active bool
DriverName string
State state.State
URL string
SwarmOptions swarm.SwarmOptions
}
func NewHost(name, driverName string, hostOptions *HostOptions) (*Host, error) {
authOptions := hostOptions.AuthOptions
storePath := filepath.Join(utils.GetMachineDir(), name)
driver, err := drivers.NewDriver(driverName, name, storePath, authOptions.CaCertPath, authOptions.PrivateKeyPath)
if err != nil {
return nil, err
}
return &Host{
Name: name,
DriverName: driverName,
Driver: driver,
StorePath: storePath,
HostOptions: hostOptions,
}, nil
}
func LoadHost(name string, StorePath string) (*Host, error) {
if _, err := os.Stat(StorePath); os.IsNotExist(err) {
return nil, fmt.Errorf("Host %q does not exist", name)
}
host := &Host{Name: name, StorePath: StorePath}
if err := host.LoadConfig(); err != nil {
return nil, err
}
return host, nil
}
func ValidateHostName(name string) bool {
return validHostNamePattern.MatchString(name)
}
func (h *Host) Create(name string) error {
// create the instance
if err := h.Driver.Create(); err != nil {
return err
}
// save to store
if err := h.SaveConfig(); err != nil {
return err
}
// TODO: Not really a fan of just checking "none" here.
if h.Driver.DriverName() != "none" {
if err := WaitForSSH(h); err != nil {
return err
}
provisioner, err := provision.DetectProvisioner(h.Driver)
if err != nil {
return err
}
if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil {
return err
}
}
return nil
}
func (h *Host) RunSSHCommand(command string) (ssh.Output, error) {
return drivers.RunSSHCommandFromDriver(h.Driver, command)
}
func (h *Host) CreateSSHShell() error {
addr, err := h.Driver.GetSSHHostname()
if err != nil {
return err
}
port, err := h.Driver.GetSSHPort()
if err != nil {
return err
}
auth := &ssh.Auth{
Keys: []string{h.Driver.GetSSHKeyPath()},
}
client, err := ssh.NewClient(h.Driver.GetSSHUsername(), addr, port, auth)
if err != nil {
return err
}
return client.Shell()
}
func (h *Host) Start() error {
if err := h.Driver.Start(); err != nil {
return err
}
if err := h.SaveConfig(); err != nil {
return err
}
return utils.WaitFor(drivers.MachineInState(h.Driver, state.Running))
}
func (h *Host) Stop() error {
if err := h.Driver.Stop(); err != nil {
return err
}
if err := h.SaveConfig(); err != nil {
return err
}
return utils.WaitFor(drivers.MachineInState(h.Driver, state.Stopped))
}
func (h *Host) Kill() error {
if err := h.Driver.Stop(); err != nil {
return err
}
if err := h.SaveConfig(); err != nil {
return err
}
return utils.WaitFor(drivers.MachineInState(h.Driver, state.Stopped))
}
func (h *Host) Restart() error {
if drivers.MachineInState(h.Driver, state.Running)() {
if err := h.Stop(); err != nil {
return err
}
if err := utils.WaitFor(drivers.MachineInState(h.Driver, state.Stopped)); err != nil {
return err
}
}
if err := h.Start(); err != nil {
return err
}
if err := utils.WaitFor(drivers.MachineInState(h.Driver, state.Running)); err != nil {
return err
}
if err := h.SaveConfig(); err != nil {
return err
}
return nil
}
func (h *Host) Upgrade() error {
machineState, err := h.Driver.GetState()
if err != nil {
return err
}
if machineState != state.Running {
log.Fatal(errMachineMustBeRunningForUpgrade)
}
provisioner, err := provision.DetectProvisioner(h.Driver)
if err != nil {
return err
}
if err := provisioner.Package("docker", pkgaction.Upgrade); err != nil {
return err
}
if err := provisioner.Service("docker", pkgaction.Restart); err != nil {
return err
}
return nil
}
func (h *Host) Remove(force bool) error {
if err := h.Driver.Remove(); err != nil {
if !force {
return err
}
}
if err := h.SaveConfig(); err != nil {
return err
}
return h.removeStorePath()
}
func (h *Host) removeStorePath() error {
file, err := os.Stat(h.StorePath)
if err != nil {
return err
}
if !file.IsDir() {
return fmt.Errorf("%q is not a directory", h.StorePath)
}
return os.RemoveAll(h.StorePath)
}
func (h *Host) GetURL() (string, error) {
return h.Driver.GetURL()
}
func (h *Host) LoadConfig() error {
data, err := ioutil.ReadFile(filepath.Join(h.StorePath, "config.json"))
if err != nil {
return err
}
// First pass: find the driver name and load the driver
var hostMetadata HostMetadata
if err := json.Unmarshal(data, &hostMetadata); err != nil {
return err
}
meta := FillNestedHostMetadata(&hostMetadata)
authOptions := meta.HostOptions.AuthOptions
driver, err := drivers.NewDriver(hostMetadata.DriverName, h.Name, h.StorePath, authOptions.CaCertPath, authOptions.PrivateKeyPath)
if err != nil {
return err
}
h.Driver = driver
// Second pass: unmarshal driver config into correct driver
if err := json.Unmarshal(data, &h); err != nil {
return err
}
return nil
}
func (h *Host) ConfigureAuth() error {
provisioner, err := provision.DetectProvisioner(h.Driver)
if err != nil {
return err
}
if err := provision.ConfigureAuth(provisioner); err != nil {
return err
}
return nil
}
func (h *Host) SaveConfig() error {
data, err := json.Marshal(h)
if err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(h.StorePath, "config.json"), data, 0600); err != nil {
return err
}
return nil
}
func (h *Host) PrintIP() error {
if ip, err := h.Driver.GetIP(); err != nil {
return err
} else {
fmt.Println(ip)
}
return nil
}
func WaitForSSH(h *Host) error {
return drivers.WaitForSSH(h.Driver)
}
func getHostState(host Host, hostListItemsChan chan<- HostListItem) {
currentState, err := host.Driver.GetState()
if err != nil {
log.Errorf("error getting state for host %s: %s", host.Name, err)
}
url, err := host.GetURL()
if err != nil {
if err == drivers.ErrHostIsNotRunning {
url = ""
} else {
log.Errorf("error getting URL for host %s: %s", host.Name, err)
}
}
dockerHost := os.Getenv("DOCKER_HOST")
hostListItemsChan <- HostListItem{
Name: host.Name,
Active: dockerHost == url && currentState != state.Stopped,
DriverName: host.Driver.DriverName(),
State: currentState,
URL: url,
SwarmOptions: *host.HostOptions.SwarmOptions,
}
}
func GetHostListItems(hostList []*Host) []HostListItem {
hostListItems := []HostListItem{}
hostListItemsChan := make(chan HostListItem)
for _, host := range hostList {
go getHostState(*host, hostListItemsChan)
}
for _ = range hostList {
hostListItems = append(hostListItems, <-hostListItemsChan)
}
close(hostListItemsChan)
return hostListItems
}
| [
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
.github/scripts/process_commit.py | #!/usr/bin/env python3
"""
This script finds the user/pr creator responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,
this script is a no-op.
Note: we ping the user only, not the reviewers, as the reviewers can sometimes be external to pytorch
with no labeling responsibility, so we don't want to bother them.
This script is based on: https://github.com/pytorch/vision/blob/main/.github/process_commit.py
"""
import sys
from typing import Any, Set, Tuple, List
import re
import os
import json
import requests
# For a PR to be properly labeled it should have release notes label and one topic label
PULL_REQUEST_EXP = "Pull Request resolved:.*pull/(.*)"
PRIMARY_LABEL_FILTER = "release notes:"
SECONDARY_LABELS = {
"topic: bc_breaking",
"topic: deprecation",
"topic: new feature",
"topic: improvements",
"topic: bug fixes",
"topic: performance",
"topic: documentation",
"topic: developer feature",
"topic: non-user visible",
}
PYTORCH_REPO = "https://api.github.com/repos/pytorch/pytorch"
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
REQUEST_HEADERS = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {GITHUB_TOKEN}'}
def query_pytorch(cmd: str) -> Any:
response = requests.get(f"{PYTORCH_REPO}/{cmd}", headers=REQUEST_HEADERS)
return response.json()
def get_pr_number(commit_hash: str) -> Any:
data = query_pytorch(f"commits/{commit_hash}")
if not data or (not data["commit"]["message"]):
return None
message = data["commit"]["message"]
p = re.compile(PULL_REQUEST_EXP)
result = p.search(message)
if not result:
return None
return result.group(1)
def get_pr_author_and_labels(pr_number: int) -> Tuple[str, Set[str]]:
# See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request
data = query_pytorch(f"pulls/{pr_number}")
user = data["user"]["login"]
labels = {label["name"] for label in data["labels"]}
return user, labels
def get_repo_labels() -> List[str]:
collected_labels: List[str] = list()
for page in range(0, 10):
response = query_pytorch(f"labels?per_page=100&page={page}")
page_labels = list(map(lambda x: str(x["name"]), response))
if not page_labels:
break
collected_labels += page_labels
return collected_labels
def post_pytorch_comment(pr_number: int, merger: str) -> Any:
message = {'body' : f"Hey {merger}." + """
You've committed this PR, but it does not have both a 'release notes: ...' and 'topics: ...' label.
Please add one of each to the PR. The 'release notes: ...' label should represent the part of
PyTorch that this PR changes (fx, autograd, distributed, etc) and the 'topics: ...' label should
represent the kind of PR it is (non-user visible, new feature, bug fix, perf improvement, etc).
The list of valid labels can be found [here](https://github.com/pytorch/pytorch/labels?q=release+notes)
for the 'release notes: ...' and [here](https://github.com/pytorch/pytorch/labels?q=topics) for the
'topics: ...'."""}
response = requests.post(
f"{PYTORCH_REPO}/issues/{pr_number}/comments",
json.dumps(message),
headers=REQUEST_HEADERS)
return response.json()
if __name__ == "__main__":
commit_hash = sys.argv[1]
pr_number = get_pr_number(commit_hash)
if not pr_number:
sys.exit(0)
user, labels = get_pr_author_and_labels(pr_number)
repo_labels = get_repo_labels()
primary_labels = set(filter(lambda x: x.startswith(PRIMARY_LABEL_FILTER), repo_labels))
is_properly_labeled = bool(primary_labels.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
post_pytorch_comment(pr_number, user)
| []
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | python | 1 | 0 | |
tests/test_fgs_imaging.py | """System test of mirage/FGS that creates simulations based on a .yaml file.
Authors
-------
- Johannes Sahlmann
Use
---
>>> pytest -s test_fgs_imaging.py
"""
import os
import pytest
from mirage import imaging_simulator as im
os.environ['TEST_FGS_DATA'] = os.path.join(os.path.dirname(__file__), 'test_data/FGS')
# Determine if tests are being run on Github Actions CI
ON_GITHUB = '/home/runner' in os.path.expanduser('~')
@pytest.mark.skipif(ON_GITHUB,
reason="Cannot access mirage data in the central storage directory from Githun Actions CI.")
def test_fgs_imaging():
m = im.ImgSim(offline=True)
m.paramfile = os.path.join(os.path.dirname(__file__), 'test_data/FGS/fgs_imaging_example.yaml')
m.create()
| []
| []
| [
"TEST_FGS_DATA"
]
| [] | ["TEST_FGS_DATA"] | python | 1 | 0 | |
examples/main.go | package main
import (
"fmt"
"html/template"
"net/http"
"os"
"sort"
"log"
"github.com/gorilla/pat"
"github.com/viddsee/goth"
"github.com/viddsee/goth/gothic"
"github.com/viddsee/goth/providers/amazon"
"github.com/viddsee/goth/providers/auth0"
"github.com/viddsee/goth/providers/azuread"
"github.com/viddsee/goth/providers/battlenet"
"github.com/viddsee/goth/providers/bitbucket"
"github.com/viddsee/goth/providers/box"
"github.com/viddsee/goth/providers/dailymotion"
"github.com/viddsee/goth/providers/deezer"
"github.com/viddsee/goth/providers/digitalocean"
"github.com/viddsee/goth/providers/discord"
"github.com/viddsee/goth/providers/dropbox"
"github.com/viddsee/goth/providers/eveonline"
"github.com/viddsee/goth/providers/facebook"
"github.com/viddsee/goth/providers/fitbit"
"github.com/viddsee/goth/providers/github"
"github.com/viddsee/goth/providers/gitlab"
"github.com/viddsee/goth/providers/gplus"
"github.com/viddsee/goth/providers/heroku"
"github.com/viddsee/goth/providers/instagram"
"github.com/viddsee/goth/providers/intercom"
"github.com/viddsee/goth/providers/lastfm"
"github.com/viddsee/goth/providers/linkedin"
"github.com/viddsee/goth/providers/meetup"
"github.com/viddsee/goth/providers/microsoftonline"
"github.com/viddsee/goth/providers/naver"
"github.com/viddsee/goth/providers/onedrive"
"github.com/viddsee/goth/providers/openidConnect"
"github.com/viddsee/goth/providers/paypal"
"github.com/viddsee/goth/providers/salesforce"
"github.com/viddsee/goth/providers/slack"
"github.com/viddsee/goth/providers/soundcloud"
"github.com/viddsee/goth/providers/spotify"
"github.com/viddsee/goth/providers/steam"
"github.com/viddsee/goth/providers/stripe"
"github.com/viddsee/goth/providers/twitch"
"github.com/viddsee/goth/providers/twitter"
"github.com/viddsee/goth/providers/uber"
"github.com/viddsee/goth/providers/vk"
"github.com/viddsee/goth/providers/wepay"
"github.com/viddsee/goth/providers/xero"
"github.com/viddsee/goth/providers/yahoo"
"github.com/viddsee/goth/providers/yammer"
)
func main() {
goth.UseProviders(
twitter.New(os.Getenv("TWITTER_KEY"), os.Getenv("TWITTER_SECRET"), "http://localhost:3000/auth/twitter/callback"),
// If you'd like to use authenticate instead of authorize in Twitter provider, use this instead.
// twitter.NewAuthenticate(os.Getenv("TWITTER_KEY"), os.Getenv("TWITTER_SECRET"), "http://localhost:3000/auth/twitter/callback"),
facebook.New(os.Getenv("FACEBOOK_KEY"), os.Getenv("FACEBOOK_SECRET"), "http://localhost:3000/auth/facebook/callback"),
fitbit.New(os.Getenv("FITBIT_KEY"), os.Getenv("FITBIT_SECRET"), "http://localhost:3000/auth/fitbit/callback"),
gplus.New(os.Getenv("GPLUS_KEY"), os.Getenv("GPLUS_SECRET"), "http://localhost:3000/auth/gplus/callback"),
github.New(os.Getenv("GITHUB_KEY"), os.Getenv("GITHUB_SECRET"), "http://localhost:3000/auth/github/callback"),
spotify.New(os.Getenv("SPOTIFY_KEY"), os.Getenv("SPOTIFY_SECRET"), "http://localhost:3000/auth/spotify/callback"),
linkedin.New(os.Getenv("LINKEDIN_KEY"), os.Getenv("LINKEDIN_SECRET"), "http://localhost:3000/auth/linkedin/callback"),
lastfm.New(os.Getenv("LASTFM_KEY"), os.Getenv("LASTFM_SECRET"), "http://localhost:3000/auth/lastfm/callback"),
twitch.New(os.Getenv("TWITCH_KEY"), os.Getenv("TWITCH_SECRET"), "http://localhost:3000/auth/twitch/callback"),
dropbox.New(os.Getenv("DROPBOX_KEY"), os.Getenv("DROPBOX_SECRET"), "http://localhost:3000/auth/dropbox/callback"),
digitalocean.New(os.Getenv("DIGITALOCEAN_KEY"), os.Getenv("DIGITALOCEAN_SECRET"), "http://localhost:3000/auth/digitalocean/callback", "read"),
bitbucket.New(os.Getenv("BITBUCKET_KEY"), os.Getenv("BITBUCKET_SECRET"), "http://localhost:3000/auth/bitbucket/callback"),
instagram.New(os.Getenv("INSTAGRAM_KEY"), os.Getenv("INSTAGRAM_SECRET"), "http://localhost:3000/auth/instagram/callback"),
intercom.New(os.Getenv("INTERCOM_KEY"), os.Getenv("INTERCOM_SECRET"), "http://localhost:3000/auth/intercom/callback"),
box.New(os.Getenv("BOX_KEY"), os.Getenv("BOX_SECRET"), "http://localhost:3000/auth/box/callback"),
salesforce.New(os.Getenv("SALESFORCE_KEY"), os.Getenv("SALESFORCE_SECRET"), "http://localhost:3000/auth/salesforce/callback"),
amazon.New(os.Getenv("AMAZON_KEY"), os.Getenv("AMAZON_SECRET"), "http://localhost:3000/auth/amazon/callback"),
yammer.New(os.Getenv("YAMMER_KEY"), os.Getenv("YAMMER_SECRET"), "http://localhost:3000/auth/yammer/callback"),
onedrive.New(os.Getenv("ONEDRIVE_KEY"), os.Getenv("ONEDRIVE_SECRET"), "http://localhost:3000/auth/onedrive/callback"),
azuread.New(os.Getenv("AZUREAD_KEY"), os.Getenv("AZUREAD_SECRET"), "http://localhost:3000/auth/azuread/callback", nil),
microsoftonline.New(os.Getenv("MICROSOFTONLINE_KEY"), os.Getenv("MICROSOFTONLINE_SECRET"), "http://localhost:3000/auth/microsoftonline/callback"),
battlenet.New(os.Getenv("BATTLENET_KEY"), os.Getenv("BATTLENET_SECRET"), "http://localhost:3000/auth/battlenet/callback"),
eveonline.New(os.Getenv("EVEONLINE_KEY"), os.Getenv("EVEONLINE_SECRET"), "http://localhost:3000/auth/eveonline/callback"),
//Pointed localhost.com to http://localhost:3000/auth/yahoo/callback through proxy as yahoo
// does not allow to put custom ports in redirection uri
yahoo.New(os.Getenv("YAHOO_KEY"), os.Getenv("YAHOO_SECRET"), "http://localhost.com"),
slack.New(os.Getenv("SLACK_KEY"), os.Getenv("SLACK_SECRET"), "http://localhost:3000/auth/slack/callback"),
stripe.New(os.Getenv("STRIPE_KEY"), os.Getenv("STRIPE_SECRET"), "http://localhost:3000/auth/stripe/callback"),
wepay.New(os.Getenv("WEPAY_KEY"), os.Getenv("WEPAY_SECRET"), "http://localhost:3000/auth/wepay/callback", "view_user"),
//By default paypal production auth urls will be used, please set PAYPAL_ENV=sandbox as environment variable for testing
//in sandbox environment
paypal.New(os.Getenv("PAYPAL_KEY"), os.Getenv("PAYPAL_SECRET"), "http://localhost:3000/auth/paypal/callback"),
steam.New(os.Getenv("STEAM_KEY"), "http://localhost:3000/auth/steam/callback"),
heroku.New(os.Getenv("HEROKU_KEY"), os.Getenv("HEROKU_SECRET"), "http://localhost:3000/auth/heroku/callback"),
uber.New(os.Getenv("UBER_KEY"), os.Getenv("UBER_SECRET"), "http://localhost:3000/auth/uber/callback"),
soundcloud.New(os.Getenv("SOUNDCLOUD_KEY"), os.Getenv("SOUNDCLOUD_SECRET"), "http://localhost:3000/auth/soundcloud/callback"),
gitlab.New(os.Getenv("GITLAB_KEY"), os.Getenv("GITLAB_SECRET"), "http://localhost:3000/auth/gitlab/callback"),
dailymotion.New(os.Getenv("DAILYMOTION_KEY"), os.Getenv("DAILYMOTION_SECRET"), "http://localhost:3000/auth/dailymotion/callback", "email"),
deezer.New(os.Getenv("DEEZER_KEY"), os.Getenv("DEEZER_SECRET"), "http://localhost:3000/auth/deezer/callback", "email"),
discord.New(os.Getenv("DISCORD_KEY"), os.Getenv("DISCORD_SECRET"), "http://localhost:3000/auth/discord/callback", discord.ScopeIdentify, discord.ScopeEmail),
meetup.New(os.Getenv("MEETUP_KEY"), os.Getenv("MEETUP_SECRET"), "http://localhost:3000/auth/meetup/callback"),
//Auth0 allocates domain per customer, a domain must be provided for auth0 to work
auth0.New(os.Getenv("AUTH0_KEY"), os.Getenv("AUTH0_SECRET"), "http://localhost:3000/auth/auth0/callback", os.Getenv("AUTH0_DOMAIN")),
xero.New(os.Getenv("XERO_KEY"), os.Getenv("XERO_SECRET"), "http://localhost:3000/auth/xero/callback"),
vk.New(os.Getenv("VK_KEY"), os.Getenv("VK_SECRET"), "http://localhost:3000/auth/vk/callback"),
naver.New(os.Getenv("NAVER_KEY"), os.Getenv("NAVER_SECRET"), "http://localhost:3000/auth/naver/callback"),
)
// OpenID Connect is based on OpenID Connect Auto Discovery URL (https://openid.net/specs/openid-connect-discovery-1_0-17.html)
// because the OpenID Connect provider initialize it self in the New(), it can return an error which should be handled or ignored
// ignore the error for now
openidConnect, _ := openidConnect.New(os.Getenv("OPENID_CONNECT_KEY"), os.Getenv("OPENID_CONNECT_SECRET"), "http://localhost:3000/auth/openid-connect/callback", os.Getenv("OPENID_CONNECT_DISCOVERY_URL"))
if openidConnect != nil {
goth.UseProviders(openidConnect)
}
m := make(map[string]string)
m["amazon"] = "Amazon"
m["bitbucket"] = "Bitbucket"
m["box"] = "Box"
m["dailymotion"] = "Dailymotion"
m["deezer"] = "Deezer"
m["digitalocean"] = "Digital Ocean"
m["discord"] = "Discord"
m["dropbox"] = "Dropbox"
m["eveonline"] = "Eve Online"
m["facebook"] = "Facebook"
m["fitbit"] = "Fitbit"
m["github"] = "Github"
m["gitlab"] = "Gitlab"
m["soundcloud"] = "SoundCloud"
m["spotify"] = "Spotify"
m["steam"] = "Steam"
m["stripe"] = "Stripe"
m["twitch"] = "Twitch"
m["uber"] = "Uber"
m["wepay"] = "Wepay"
m["yahoo"] = "Yahoo"
m["yammer"] = "Yammer"
m["gplus"] = "Google Plus"
m["heroku"] = "Heroku"
m["instagram"] = "Instagram"
m["intercom"] = "Intercom"
m["lastfm"] = "Last FM"
m["linkedin"] = "Linkedin"
m["onedrive"] = "Onedrive"
m["azuread"] = "Azure AD"
m["microsoftonline"] = "Microsoft Online"
m["battlenet"] = "Battlenet"
m["paypal"] = "Paypal"
m["twitter"] = "Twitter"
m["salesforce"] = "Salesforce"
m["slack"] = "Slack"
m["meetup"] = "Meetup.com"
m["auth0"] = "Auth0"
m["openid-connect"] = "OpenID Connect"
m["xero"] = "Xero"
m["vk"] = "VK"
m["naver"] = "Naver"
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
providerIndex := &ProviderIndex{Providers: keys, ProvidersMap: m}
p := pat.New()
p.Get("/auth/{provider}/callback", func(res http.ResponseWriter, req *http.Request) {
user, err := gothic.CompleteUserAuth(res, req)
if err != nil {
fmt.Fprintln(res, err)
return
}
t, _ := template.New("foo").Parse(userTemplate)
t.Execute(res, user)
})
p.Get("/logout/{provider}", func(res http.ResponseWriter, req *http.Request) {
gothic.Logout(res, req)
res.Header().Set("Location", "/")
res.WriteHeader(http.StatusTemporaryRedirect)
})
p.Get("/auth/{provider}", func(res http.ResponseWriter, req *http.Request) {
// try to get the user without re-authenticating
if gothUser, err := gothic.CompleteUserAuth(res, req); err == nil {
t, _ := template.New("foo").Parse(userTemplate)
t.Execute(res, gothUser)
} else {
gothic.BeginAuthHandler(res, req)
}
})
p.Get("/", func(res http.ResponseWriter, req *http.Request) {
t, _ := template.New("foo").Parse(indexTemplate)
t.Execute(res, providerIndex)
})
log.Fatal(http.ListenAndServe(":3000", p))
}
type ProviderIndex struct {
Providers []string
ProvidersMap map[string]string
}
var indexTemplate = `{{range $key,$value:=.Providers}}
<p><a href="/auth/{{$value}}">Log in with {{index $.ProvidersMap $value}}</a></p>
{{end}}`
var userTemplate = `
<p><a href="/logout/{{.Provider}}">logout</a></p>
<p>Name: {{.Name}} [{{.LastName}}, {{.FirstName}}]</p>
<p>Email: {{.Email}}</p>
<p>NickName: {{.NickName}}</p>
<p>Location: {{.Location}}</p>
<p>AvatarURL: {{.AvatarURL}} <img src="{{.AvatarURL}}"></p>
<p>Description: {{.Description}}</p>
<p>UserID: {{.UserID}}</p>
<p>AccessToken: {{.AccessToken}}</p>
<p>ExpiresAt: {{.ExpiresAt}}</p>
<p>RefreshToken: {{.RefreshToken}}</p>
`
| [
"\"TWITTER_KEY\"",
"\"TWITTER_SECRET\"",
"\"TWITTER_KEY\"",
"\"TWITTER_SECRET\"",
"\"FACEBOOK_KEY\"",
"\"FACEBOOK_SECRET\"",
"\"FITBIT_KEY\"",
"\"FITBIT_SECRET\"",
"\"GPLUS_KEY\"",
"\"GPLUS_SECRET\"",
"\"GITHUB_KEY\"",
"\"GITHUB_SECRET\"",
"\"SPOTIFY_KEY\"",
"\"SPOTIFY_SECRET\"",
"\"LINKEDIN_KEY\"",
"\"LINKEDIN_SECRET\"",
"\"LASTFM_KEY\"",
"\"LASTFM_SECRET\"",
"\"TWITCH_KEY\"",
"\"TWITCH_SECRET\"",
"\"DROPBOX_KEY\"",
"\"DROPBOX_SECRET\"",
"\"DIGITALOCEAN_KEY\"",
"\"DIGITALOCEAN_SECRET\"",
"\"BITBUCKET_KEY\"",
"\"BITBUCKET_SECRET\"",
"\"INSTAGRAM_KEY\"",
"\"INSTAGRAM_SECRET\"",
"\"INTERCOM_KEY\"",
"\"INTERCOM_SECRET\"",
"\"BOX_KEY\"",
"\"BOX_SECRET\"",
"\"SALESFORCE_KEY\"",
"\"SALESFORCE_SECRET\"",
"\"AMAZON_KEY\"",
"\"AMAZON_SECRET\"",
"\"YAMMER_KEY\"",
"\"YAMMER_SECRET\"",
"\"ONEDRIVE_KEY\"",
"\"ONEDRIVE_SECRET\"",
"\"AZUREAD_KEY\"",
"\"AZUREAD_SECRET\"",
"\"MICROSOFTONLINE_KEY\"",
"\"MICROSOFTONLINE_SECRET\"",
"\"BATTLENET_KEY\"",
"\"BATTLENET_SECRET\"",
"\"EVEONLINE_KEY\"",
"\"EVEONLINE_SECRET\"",
"\"YAHOO_KEY\"",
"\"YAHOO_SECRET\"",
"\"SLACK_KEY\"",
"\"SLACK_SECRET\"",
"\"STRIPE_KEY\"",
"\"STRIPE_SECRET\"",
"\"WEPAY_KEY\"",
"\"WEPAY_SECRET\"",
"\"PAYPAL_KEY\"",
"\"PAYPAL_SECRET\"",
"\"STEAM_KEY\"",
"\"HEROKU_KEY\"",
"\"HEROKU_SECRET\"",
"\"UBER_KEY\"",
"\"UBER_SECRET\"",
"\"SOUNDCLOUD_KEY\"",
"\"SOUNDCLOUD_SECRET\"",
"\"GITLAB_KEY\"",
"\"GITLAB_SECRET\"",
"\"DAILYMOTION_KEY\"",
"\"DAILYMOTION_SECRET\"",
"\"DEEZER_KEY\"",
"\"DEEZER_SECRET\"",
"\"DISCORD_KEY\"",
"\"DISCORD_SECRET\"",
"\"MEETUP_KEY\"",
"\"MEETUP_SECRET\"",
"\"AUTH0_KEY\"",
"\"AUTH0_SECRET\"",
"\"AUTH0_DOMAIN\"",
"\"XERO_KEY\"",
"\"XERO_SECRET\"",
"\"VK_KEY\"",
"\"VK_SECRET\"",
"\"NAVER_KEY\"",
"\"NAVER_SECRET\"",
"\"OPENID_CONNECT_KEY\"",
"\"OPENID_CONNECT_SECRET\"",
"\"OPENID_CONNECT_DISCOVERY_URL\""
]
| []
| [
"MEETUP_KEY",
"MICROSOFTONLINE_SECRET",
"FITBIT_KEY",
"INTERCOM_KEY",
"SPOTIFY_KEY",
"WEPAY_KEY",
"EVEONLINE_SECRET",
"YAHOO_SECRET",
"YAMMER_KEY",
"VK_SECRET",
"DAILYMOTION_KEY",
"SALESFORCE_KEY",
"ONEDRIVE_SECRET",
"BITBUCKET_SECRET",
"SOUNDCLOUD_SECRET",
"UBER_SECRET",
"BOX_KEY",
"SOUNDCLOUD_KEY",
"DEEZER_SECRET",
"LASTFM_KEY",
"YAMMER_SECRET",
"YAHOO_KEY",
"STEAM_KEY",
"GITLAB_SECRET",
"SALESFORCE_SECRET",
"AZUREAD_KEY",
"GPLUS_SECRET",
"SLACK_SECRET",
"AMAZON_KEY",
"INTERCOM_SECRET",
"MEETUP_SECRET",
"GPLUS_KEY",
"BATTLENET_KEY",
"VK_KEY",
"BITBUCKET_KEY",
"FACEBOOK_SECRET",
"FITBIT_SECRET",
"STRIPE_KEY",
"TWITTER_SECRET",
"BATTLENET_SECRET",
"AMAZON_SECRET",
"DIGITALOCEAN_SECRET",
"DISCORD_KEY",
"TWITCH_SECRET",
"UBER_KEY",
"SLACK_KEY",
"WEPAY_SECRET",
"STRIPE_SECRET",
"GITHUB_SECRET",
"MICROSOFTONLINE_KEY",
"LASTFM_SECRET",
"INSTAGRAM_KEY",
"EVEONLINE_KEY",
"DROPBOX_SECRET",
"AUTH0_SECRET",
"DROPBOX_KEY",
"OPENID_CONNECT_DISCOVERY_URL",
"FACEBOOK_KEY",
"DISCORD_SECRET",
"TWITCH_KEY",
"OPENID_CONNECT_KEY",
"NAVER_SECRET",
"AUTH0_KEY",
"AZUREAD_SECRET",
"XERO_KEY",
"GITLAB_KEY",
"NAVER_KEY",
"XERO_SECRET",
"INSTAGRAM_SECRET",
"LINKEDIN_SECRET",
"DAILYMOTION_SECRET",
"SPOTIFY_SECRET",
"DEEZER_KEY",
"GITHUB_KEY",
"ONEDRIVE_KEY",
"HEROKU_SECRET",
"OPENID_CONNECT_SECRET",
"BOX_SECRET",
"PAYPAL_SECRET",
"PAYPAL_KEY",
"HEROKU_KEY",
"DIGITALOCEAN_KEY",
"AUTH0_DOMAIN",
"TWITTER_KEY",
"LINKEDIN_KEY"
]
| [] | ["MEETUP_KEY", "MICROSOFTONLINE_SECRET", "FITBIT_KEY", "INTERCOM_KEY", "SPOTIFY_KEY", "WEPAY_KEY", "EVEONLINE_SECRET", "YAHOO_SECRET", "YAMMER_KEY", "VK_SECRET", "DAILYMOTION_KEY", "SALESFORCE_KEY", "ONEDRIVE_SECRET", "BITBUCKET_SECRET", "SOUNDCLOUD_SECRET", "UBER_SECRET", "BOX_KEY", "SOUNDCLOUD_KEY", "DEEZER_SECRET", "LASTFM_KEY", "YAMMER_SECRET", "YAHOO_KEY", "STEAM_KEY", "GITLAB_SECRET", "SALESFORCE_SECRET", "AZUREAD_KEY", "GPLUS_SECRET", "SLACK_SECRET", "AMAZON_KEY", "INTERCOM_SECRET", "MEETUP_SECRET", "GPLUS_KEY", "BATTLENET_KEY", "VK_KEY", "BITBUCKET_KEY", "FACEBOOK_SECRET", "FITBIT_SECRET", "STRIPE_KEY", "TWITTER_SECRET", "BATTLENET_SECRET", "AMAZON_SECRET", "DIGITALOCEAN_SECRET", "DISCORD_KEY", "TWITCH_SECRET", "UBER_KEY", "SLACK_KEY", "WEPAY_SECRET", "STRIPE_SECRET", "GITHUB_SECRET", "MICROSOFTONLINE_KEY", "LASTFM_SECRET", "INSTAGRAM_KEY", "EVEONLINE_KEY", "DROPBOX_SECRET", "AUTH0_SECRET", "DROPBOX_KEY", "OPENID_CONNECT_DISCOVERY_URL", "FACEBOOK_KEY", "DISCORD_SECRET", "TWITCH_KEY", "OPENID_CONNECT_KEY", "NAVER_SECRET", "AUTH0_KEY", "AZUREAD_SECRET", "XERO_KEY", "GITLAB_KEY", "NAVER_KEY", "XERO_SECRET", "INSTAGRAM_SECRET", "LINKEDIN_SECRET", "DAILYMOTION_SECRET", "SPOTIFY_SECRET", "DEEZER_KEY", "GITHUB_KEY", "ONEDRIVE_KEY", "HEROKU_SECRET", "OPENID_CONNECT_SECRET", "BOX_SECRET", "PAYPAL_SECRET", "PAYPAL_KEY", "HEROKU_KEY", "DIGITALOCEAN_KEY", "AUTH0_DOMAIN", "TWITTER_KEY", "LINKEDIN_KEY"] | go | 85 | 0 | |
cmd/mavenBuild_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type mavenBuildOptions struct {
PomPath string `json:"pomPath,omitempty"`
Flatten bool `json:"flatten,omitempty"`
Verify bool `json:"verify,omitempty"`
ProjectSettingsFile string `json:"projectSettingsFile,omitempty"`
GlobalSettingsFile string `json:"globalSettingsFile,omitempty"`
M2Path string `json:"m2Path,omitempty"`
LogSuccessfulMavenTransfers bool `json:"logSuccessfulMavenTransfers,omitempty"`
CreateBOM bool `json:"createBOM,omitempty"`
AltDeploymentRepositoryPassword string `json:"altDeploymentRepositoryPassword,omitempty"`
AltDeploymentRepositoryUser string `json:"altDeploymentRepositoryUser,omitempty"`
AltDeploymentRepositoryURL string `json:"altDeploymentRepositoryUrl,omitempty"`
AltDeploymentRepositoryID string `json:"altDeploymentRepositoryID,omitempty"`
CustomTLSCertificateLinks []string `json:"customTlsCertificateLinks,omitempty"`
Publish bool `json:"publish,omitempty"`
}
// MavenBuildCommand This step will install the maven project into the local maven repository.
func MavenBuildCommand() *cobra.Command {
const STEP_NAME = "mavenBuild"
metadata := mavenBuildMetadata()
var stepConfig mavenBuildOptions
var startTime time.Time
var logCollector *log.CollectorHook
var createMavenBuildCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step will install the maven project into the local maven repository.",
Long: `This step will install the maven project into the local maven repository.
It will also prepare jacoco to record the code coverage and
supports ci friendly versioning by flattening the pom before installing.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.AltDeploymentRepositoryPassword)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
mavenBuild(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addMavenBuildFlags(createMavenBuildCmd, &stepConfig)
return createMavenBuildCmd
}
func addMavenBuildFlags(cmd *cobra.Command, stepConfig *mavenBuildOptions) {
cmd.Flags().StringVar(&stepConfig.PomPath, "pomPath", `pom.xml`, "Path to the pom file which should be installed including all children.")
cmd.Flags().BoolVar(&stepConfig.Flatten, "flatten", true, "Defines if the pom files should be flattened to support ci friendly maven versioning.")
cmd.Flags().BoolVar(&stepConfig.Verify, "verify", false, "Instead of installing the artifact only the verify lifecycle phase is executed.")
cmd.Flags().StringVar(&stepConfig.ProjectSettingsFile, "projectSettingsFile", os.Getenv("PIPER_projectSettingsFile"), "Path to the mvn settings file that should be used as project settings file.")
cmd.Flags().StringVar(&stepConfig.GlobalSettingsFile, "globalSettingsFile", os.Getenv("PIPER_globalSettingsFile"), "Path to the mvn settings file that should be used as global settings file.")
cmd.Flags().StringVar(&stepConfig.M2Path, "m2Path", os.Getenv("PIPER_m2Path"), "Path to the location of the local repository that should be used.")
cmd.Flags().BoolVar(&stepConfig.LogSuccessfulMavenTransfers, "logSuccessfulMavenTransfers", false, "Configures maven to log successful downloads. This is set to `false` by default to reduce the noise in build logs.")
cmd.Flags().BoolVar(&stepConfig.CreateBOM, "createBOM", false, "Creates the bill of materials (BOM) using CycloneDX Maven plugin.")
cmd.Flags().StringVar(&stepConfig.AltDeploymentRepositoryPassword, "altDeploymentRepositoryPassword", os.Getenv("PIPER_altDeploymentRepositoryPassword"), "Password for the alternative deployment repository to which the project artifacts should be deployed ( other than those specified in <distributionManagement> ). This password will be updated in settings.xml . When no settings.xml is provided a new one is created corresponding with <servers> tag")
cmd.Flags().StringVar(&stepConfig.AltDeploymentRepositoryUser, "altDeploymentRepositoryUser", os.Getenv("PIPER_altDeploymentRepositoryUser"), "User for the alternative deployment repository to which the project artifacts should be deployed ( other than those specified in <distributionManagement> ). This user will be updated in settings.xml . When no settings.xml is provided a new one is created corresponding with <servers> tag")
cmd.Flags().StringVar(&stepConfig.AltDeploymentRepositoryURL, "altDeploymentRepositoryUrl", os.Getenv("PIPER_altDeploymentRepositoryUrl"), "Url for the alternative deployment repository to which the project artifacts should be deployed ( other than those specified in <distributionManagement> ). This Url will be updated in settings.xml . When no settings.xml is provided a new one is created corresponding with <servers> tag")
cmd.Flags().StringVar(&stepConfig.AltDeploymentRepositoryID, "altDeploymentRepositoryID", os.Getenv("PIPER_altDeploymentRepositoryID"), "Id for the alternative deployment repository to which the project artifacts should be deployed ( other than those specified in <distributionManagement> ). This id will be updated in settings.xml and will be used as a flag with DaltDeploymentRepository along with mavenAltDeploymentRepositoryUrl during maven deploy . When no settings.xml is provided a new one is created corresponding with <servers> tag")
cmd.Flags().StringSliceVar(&stepConfig.CustomTLSCertificateLinks, "customTlsCertificateLinks", []string{}, "List of download links to custom TLS certificates. This is required to ensure trusted connections to instances with repositories (like nexus) when publish flag is set to true.")
cmd.Flags().BoolVar(&stepConfig.Publish, "publish", false, "Configures maven to run the deploy plugin to publish artifacts to a repository.")
}
// retrieve step metadata
func mavenBuildMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "mavenBuild",
Aliases: []config.Alias{{Name: "mavenExecute", Deprecated: false}},
Description: "This step will install the maven project into the local maven repository.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "altDeploymentRepositoryPasswordId", Description: "Jenkins credentials ID containing the artifact deployment repository password.", Type: "jenkins"},
},
Parameters: []config.StepParameters{
{
Name: "pomPath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `pom.xml`,
},
{
Name: "flatten",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: true,
},
{
Name: "verify",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: false,
},
{
Name: "projectSettingsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/projectSettingsFile"}},
Default: os.Getenv("PIPER_projectSettingsFile"),
},
{
Name: "globalSettingsFile",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/mavenGlobalSettingsFile",
},
},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/globalSettingsFile"}},
Default: os.Getenv("PIPER_globalSettingsFile"),
},
{
Name: "m2Path",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/m2Path"}},
Default: os.Getenv("PIPER_m2Path"),
},
{
Name: "logSuccessfulMavenTransfers",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/logSuccessfulMavenTransfers"}},
Default: false,
},
{
Name: "createBOM",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/createBOM"}},
Default: false,
},
{
Name: "altDeploymentRepositoryPassword",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryPassword",
},
{
Name: "altDeploymentRepositoryPasswordId",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/alt-deployment-repository-passowrd", "$(vaultBasePath)/$(vaultPipelineName)/alt-deployment-repository-passowrd", "$(vaultBasePath)/GROUP-SECRETS/alt-deployment-repository-passowrd"},
Type: "vaultSecretFile",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_altDeploymentRepositoryPassword"),
},
{
Name: "altDeploymentRepositoryUser",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryUsername",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_altDeploymentRepositoryUser"),
},
{
Name: "altDeploymentRepositoryUrl",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryUrl",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_altDeploymentRepositoryUrl"),
},
{
Name: "altDeploymentRepositoryID",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryId",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_altDeploymentRepositoryID"),
},
{
Name: "customTlsCertificateLinks",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
{
Name: "publish",
ResourceRef: []config.ResourceReference{},
Scope: []string{"STEPS", "STAGES", "PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/publish"}},
Default: false,
},
},
},
Containers: []config.Container{
{Name: "mvn", Image: "maven:3.6-jdk-8"},
},
},
}
return theMetaData
}
| [
"\"PIPER_projectSettingsFile\"",
"\"PIPER_globalSettingsFile\"",
"\"PIPER_m2Path\"",
"\"PIPER_altDeploymentRepositoryPassword\"",
"\"PIPER_altDeploymentRepositoryUser\"",
"\"PIPER_altDeploymentRepositoryUrl\"",
"\"PIPER_altDeploymentRepositoryID\"",
"\"PIPER_projectSettingsFile\"",
"\"PIPER_globalSettingsFile\"",
"\"PIPER_m2Path\"",
"\"PIPER_altDeploymentRepositoryPassword\"",
"\"PIPER_altDeploymentRepositoryUser\"",
"\"PIPER_altDeploymentRepositoryUrl\"",
"\"PIPER_altDeploymentRepositoryID\""
]
| []
| [
"PIPER_altDeploymentRepositoryID",
"PIPER_globalSettingsFile",
"PIPER_altDeploymentRepositoryUser",
"PIPER_altDeploymentRepositoryPassword",
"PIPER_m2Path",
"PIPER_altDeploymentRepositoryUrl",
"PIPER_projectSettingsFile"
]
| [] | ["PIPER_altDeploymentRepositoryID", "PIPER_globalSettingsFile", "PIPER_altDeploymentRepositoryUser", "PIPER_altDeploymentRepositoryPassword", "PIPER_m2Path", "PIPER_altDeploymentRepositoryUrl", "PIPER_projectSettingsFile"] | go | 7 | 0 | |
setup.py | #!/usr/bin/env python
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DBUILD_FOR_PYTHON=ON',
'-Wno-dev']
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
# Pile all .so in one place and use $ORIGIN as RPATH
cmake_args += ["-DCMAKE_BUILD_WITH_INSTALL_RPATH=TRUE"]
cmake_args += ["-DCMAKE_INSTALL_RPATH={}".format("$ORIGIN")]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j1']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.', '--target', os.path.basename(ext.name)] + build_args,
cwd=self.build_temp)
setup(
name='SymSpellCppPy',
version='0.0.14',
author='Arjun Variar & Mohit Tare',
author_email='[email protected]',
description='A Fast SymSpell port for python written in C++ using pybind11.',
long_description=long_description,
long_description_content_type='text/markdown',
test_suite="tests/SymSpellCppPyTest.py",
ext_modules=[CMakeExtension('SymSpellCppPy')],
cmdclass=dict(build_ext=CMakeBuild),
python_requires=">=3.4",
zip_safe=False,
url="https://github.com/viig99/SymSpellCppPy",
classifiers=[
"Intended Audience :: Developers",
"Natural Language :: English",
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9"
]
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
example/bucket/deleteCORS.go | package main
import (
"context"
"net/url"
"os"
"net/http"
"github.com/huanght1997/cos-go-sdk-v5"
"github.com/huanght1997/cos-go-sdk-v5/debug"
)
func main() {
u, _ := url.Parse("https://test-1253846586.cos.ap-guangzhou.myqcloud.com")
b := &cos.BaseURL{
BucketURL: u,
}
c := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
Transport: &debug.DebugRequestTransport{
RequestHeader: true,
RequestBody: true,
ResponseHeader: true,
ResponseBody: true,
},
},
})
_, err := c.Bucket.DeleteCORS(context.Background())
if err != nil {
panic(err)
}
}
| [
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
]
| []
| [
"COS_SECRETKEY",
"COS_SECRETID"
]
| [] | ["COS_SECRETKEY", "COS_SECRETID"] | go | 2 | 0 | |
pkg/nas/utils.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nas
import (
"context"
"fmt"
"github.com/kubernetes-sigs/alibaba-cloud-csi-driver/pkg/cnfs/v1beta1"
"io/ioutil"
"k8s.io/client-go/dynamic"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"errors"
aliyunep "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
aliNas "github.com/aliyun/alibaba-cloud-sdk-go/services/nas"
"github.com/kubernetes-sigs/alibaba-cloud-csi-driver/pkg/utils"
log "github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// MetadataURL is metadata url
MetadataURL = "http://100.100.100.200/latest/meta-data/"
// RegionTag is region id
RegionTag = "region-id"
// NsenterCmd is nsenter mount command
NsenterCmd = "/nsenter --mount=/proc/1/ns/mnt"
// LoopLockFile lock file for nas loopsetup
LoopLockFile = "loopsetup.nas.csi.alibabacloud.com.lck"
// LoopImgFile image file for nas loopsetup
LoopImgFile = "loopsetup.nas.csi.alibabacloud.com.img"
// Resize2fsFailedFilename ...
Resize2fsFailedFilename = "resize2fs_failed.txt"
// Resize2fsFailedFixCmd ...
Resize2fsFailedFixCmd = "%s fsck -a %s"
)
var (
// VERSION should be updated by hand at each release
VERSION = "v1.14.8"
// GITCOMMIT will be overwritten automatically by the build system
GITCOMMIT = "HEAD"
// KubernetesAlicloudIdentity is the system identity for ecs client request
KubernetesAlicloudIdentity = fmt.Sprintf("Kubernetes.Alicloud/CsiProvision.Nas-%s", VERSION)
)
// RoleAuth define STS Token Response
type RoleAuth struct {
AccessKeyID string
AccessKeySecret string
Expiration time.Time
SecurityToken string
LastUpdated time.Time
Code string
}
//DoNfsMount execute the mount command for nas dir
func DoNfsMount(nfsServer, nfsPath, nfsVers, mountOptions, mountPoint, volumeID string) error {
if !utils.IsFileExisting(mountPoint) {
CreateDest(mountPoint)
}
if CheckNfsPathMounted(mountPoint, nfsServer, nfsPath) {
log.Infof("DoNfsMount: nfs server already mounted: %s, %s", nfsServer, nfsPath)
return nil
}
mntCmd := fmt.Sprintf("mount -t nfs -o vers=%s %s:%s %s", nfsVers, nfsServer, nfsPath, mountPoint)
if mountOptions != "" {
mntCmd = fmt.Sprintf("mount -t nfs -o vers=%s,%s %s:%s %s", nfsVers, mountOptions, nfsServer, nfsPath, mountPoint)
}
_, err := utils.Run(mntCmd)
if err != nil && nfsPath != "/" {
if strings.Contains(err.Error(), "reason given by server: No such file or directory") || strings.Contains(err.Error(), "access denied by server while mounting") {
if err := createNasSubDir(nfsServer, nfsPath, nfsVers, mountOptions, volumeID); err != nil {
log.Errorf("DoNfsMount: Create SubPath error: %s", err.Error())
return err
}
if _, err := utils.Run(mntCmd); err != nil {
log.Errorf("DoNfsMount, Mount Nfs sub directory fail: %s", err.Error())
return err
}
} else {
return err
}
} else if err != nil {
return err
}
log.Infof("DoNfsMount: mount nfs successful with command: %s", mntCmd)
return nil
}
//CheckNfsPathMounted check whether the given nfs path was mounted
func CheckNfsPathMounted(mountpoint, server, path string) bool {
// mntCmd := fmt.Sprintf("findmnt %s | grep %s | grep %s | grep -v grep | wc -l", mountpoint, server, path)
mntCmd := fmt.Sprintf("cat /proc/mounts | grep %s | grep %s | grep -v grep | wc -l", mountpoint, path)
// mntCmd := fmt.Sprintf("grep -E -- '%s.*%s' /proc/mounts", mountpoint, path)
if out, err := utils.Run(mntCmd); err == nil && strings.TrimSpace(out) != "0" {
return true
}
return false
}
//CreateDest create the target
func CreateDest(dest string) error {
fi, err := os.Lstat(dest)
if os.IsNotExist(err) {
if err := os.MkdirAll(dest, 0777); err != nil {
return err
}
} else if err != nil {
return err
}
if fi != nil && !fi.IsDir() {
return fmt.Errorf("%v already exist but it's not a directory", dest)
}
return nil
}
// GetNfsDetails get nfs server's details
func GetNfsDetails(nfsServersString string) (string, string) {
nfsServer, nfsPath := "", ""
nfsServerList := strings.Split(nfsServersString, ",")
serverNum := len(nfsServerList)
if _, ok := storageClassServerPos[nfsServersString]; !ok {
storageClassServerPos[nfsServersString] = 0
}
zoneIndex := storageClassServerPos[nfsServersString] % serverNum
selectedServer := nfsServerList[zoneIndex]
storageClassServerPos[nfsServersString]++
serverParts := strings.Split(selectedServer, ":")
if len(serverParts) == 1 {
nfsServer = serverParts[0]
nfsPath = "/"
} else if len(serverParts) == 2 {
nfsServer = serverParts[0]
nfsPath = serverParts[1]
if nfsPath == "" {
nfsPath = "/"
}
} else {
nfsServer = ""
nfsPath = ""
}
// remove / if path end with /;
if nfsPath != "/" && strings.HasSuffix(nfsPath, "/") {
nfsPath = nfsPath[0 : len(nfsPath)-1]
}
return nfsServer, nfsPath
}
// GetMetaData get host regionid, zoneid
func GetMetaData(resource string) string {
resp, err := http.Get(MetadataURL + resource)
if err != nil {
return ""
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return ""
}
return string(body)
}
func updateNasClient(client *aliNas.Client, regionID string) *aliNas.Client {
accessKeyID, accessSecret, accessToken := utils.GetDefaultAK()
if accessToken != "" {
client = newNasClient(accessKeyID, accessSecret, accessToken, regionID)
}
if client.Client.GetConfig() != nil {
client.Client.GetConfig().UserAgent = KubernetesAlicloudIdentity
}
return client
}
func newNasClient(accessKeyID, accessKeySecret, accessToken, regionID string) (nasClient *aliNas.Client) {
var err error
if regionID == "" {
regionID = GetMetaData(RegionTag)
}
if accessToken == "" {
nasClient, err = aliNas.NewClientWithAccessKey(regionID, accessKeyID, accessKeySecret)
if err != nil {
return nil
}
} else {
nasClient, err = aliNas.NewClientWithStsToken(regionID, accessKeyID, accessKeySecret, accessToken)
if err != nil {
return nil
}
}
// Set Nas Endpoint
SetNasEndPoint(regionID)
return
}
// SetNasEndPoint Set Endpoint for Nas
func SetNasEndPoint(regionID string) {
// use unitized region endpoint for blew regions.
// total 16 regions
unitizedRegions := []string{"cn-hangzhou", "cn-zhangjiakou", "cn-huhehaote", "cn-shenzhen", "ap-southeast-1", "ap-southeast-2",
"ap-southeast-3", "ap-southeast-5", "eu-central-1", "us-east-1", "ap-northeast-1", "ap-south-1",
"us-west-1", "eu-west-1", "cn-chengdu", "cn-north-2-gov-1", "cn-beijing", "cn-shanghai", "cn-hongkong",
"cn-shenzhen-finance-1", "cn-shanghai-finance-1", "cn-hangzhou-finance", "cn-qingdao"}
for _, tmpRegion := range unitizedRegions {
if regionID == tmpRegion {
aliyunep.AddEndpointMapping(regionID, "Nas", "nas-vpc."+regionID+".aliyuncs.com")
break
}
}
// use environment endpoint setting first;
if ep := os.Getenv("NAS_ENDPOINT"); ep != "" {
aliyunep.AddEndpointMapping(regionID, "Nas", ep)
}
}
func waitTimeout(wg *sync.WaitGroup, timeout int) bool {
c := make(chan struct{})
go func() {
defer close(c)
wg.Wait()
}()
select {
case <-c:
return false
case <-time.After(time.Duration(timeout) * time.Second):
return true
}
}
func createNasSubDir(nfsServer, nfsPath, nfsVers, nfsOptions string, volumeID string) error {
// step 1: create mount path
nasTmpPath := filepath.Join(NasTempMntPath, volumeID)
if err := utils.CreateDest(nasTmpPath); err != nil {
log.Infof("Create Nas temp Directory err: " + err.Error())
return err
}
if utils.IsMounted(nasTmpPath) {
utils.Umount(nasTmpPath)
}
// step 2: do mount, and create subpath by extreme
if nfsOptions != "" {
nfsVers = nfsVers + "," + nfsOptions
}
usePath := nfsPath
rootPath := "/"
//server is extreme nas
if strings.Contains(nfsServer, "extreme.nas.aliyuncs.com") {
//1.No need to deal with the case where nfsPath only beginning with /share or /share/
//2.No need to deal with the case where nfspath does not contain /share or /share/ at the beginning
//3.Need to deal with the case where nfsPath is /share/subpath
if strings.HasPrefix(nfsPath, "/share/") && len(nfsPath) > len("/share/") {
rootPath = "/share/"
usePath = nfsPath[6:]
}
}
mntCmdRootPath := fmt.Sprintf("mount -t nfs -o vers=%s %s:%s %s", nfsVers, nfsServer, rootPath, nasTmpPath)
_, err := utils.Run(mntCmdRootPath)
if err != nil {
log.Errorf("Nas, mount directory rootPath fail, rootPath:%s, err:%s", rootPath, err.Error())
return err
}
subPath := path.Join(nasTmpPath, usePath)
if err := utils.CreateDest(subPath); err != nil {
log.Infof("Nas, Create Sub Directory fail, subPath:%s, err: " + err.Error())
return err
}
// step 3: umount after create
utils.Umount(nasTmpPath)
log.Infof("Create Sub Directory successful, nfsPath:%s, subPath:%s", nfsPath, subPath)
return nil
}
func setNasVolumeCapacity(nfsServer, nfsPath string, volSizeBytes int64) error {
if nfsPath == "" || nfsPath == "/" {
return fmt.Errorf("Volume %s:%s not support set quota to root path ", nfsServer, nfsPath)
}
pvSizeGB := volSizeBytes / (1024 * 1024 * 1024)
nasClient := updateNasClient(GlobalConfigVar.NasClient, GetMetaData(RegionTag))
fsList := strings.Split(nfsServer, "-")
if len(fsList) < 1 {
return fmt.Errorf("volume error nas server(%s) ", nfsServer)
}
quotaRequest := aliNas.CreateSetDirQuotaRequest()
quotaRequest.FileSystemId = fsList[0]
quotaRequest.Path = nfsPath
quotaRequest.UserType = "AllUsers"
quotaRequest.QuotaType = "Enforcement"
pvSizeGBStr := strconv.FormatInt(pvSizeGB, 10)
quotaRequest.SizeLimit = requests.Integer(pvSizeGBStr)
quotaRequest.RegionId = GetMetaData(RegionTag)
_, err := nasClient.SetDirQuota(quotaRequest)
if err != nil {
if strings.Contains(err.Error(), "The specified FileSystem does not exist.") {
return fmt.Errorf("extreme did not support quota, please change %s to General Purpose NAS", nfsServer)
}
return fmt.Errorf("volume set nas quota with error: %s", err.Error())
}
return nil
}
func setNasVolumeCapacityWithID(pvObj *v1.PersistentVolume, crdClient dynamic.Interface, volSizeBytes int64) error {
if pvObj.Spec.CSI == nil {
return fmt.Errorf("Volume %s is not CSI type %v ", pvObj.Name, pvObj)
}
// Check Pv volume parameters
if value, ok := pvObj.Spec.CSI.VolumeAttributes["volumeCapacity"]; ok && value == "false" {
return fmt.Errorf("Volume %s not contain volumeCapacity parameters, not support expand, PV: %v ", pvObj.Name, pvObj)
}
nfsServer, nfsPath := "", ""
if value, ok := pvObj.Spec.CSI.VolumeAttributes["server"]; ok {
nfsServer = value
} else {
if value, ok := pvObj.Spec.CSI.VolumeAttributes["containerNetworkFileSystem"]; ok {
server, err := v1beta1.GetContainerNetworkFileSystemServer(crdClient, value)
if err != nil {
return err
}
nfsServer = server
}
}
if value, ok := pvObj.Spec.CSI.VolumeAttributes["path"]; ok {
nfsPath = value
}
return setNasVolumeCapacity(nfsServer, nfsPath, volSizeBytes)
}
// check system config,
// if tcp_slot_table_entries not set to 128, just config.
func checkSystemNasConfig() {
updateNasConfig := false
sunRPCFile := "/etc/modprobe.d/sunrpc.conf"
if !utils.IsFileExisting(sunRPCFile) {
updateNasConfig = true
} else {
chkCmd := fmt.Sprintf("cat %s | grep tcp_slot_table_entries | grep 128 | grep -v grep | wc -l", sunRPCFile)
out, err := utils.Run(chkCmd)
if err != nil {
log.Warnf("Update Nas system config check error: %s", err.Error())
return
}
if strings.TrimSpace(out) == "0" {
updateNasConfig = true
}
}
if updateNasConfig {
upCmd := fmt.Sprintf("echo \"options sunrpc tcp_slot_table_entries=128\" >> %s && echo \"options sunrpc tcp_max_slot_table_entries=128\" >> %s && sysctl -w sunrpc.tcp_slot_table_entries=128", sunRPCFile, sunRPCFile)
_, err := utils.Run(upCmd)
if err != nil {
log.Warnf("Update Nas system config error: %s", err.Error())
return
}
log.Warnf("Successful update Nas system config")
}
}
// ParseMountFlags parse mountOptions
func ParseMountFlags(mntOptions []string) (string, string) {
if len(mntOptions) > 0 {
mntOptionsStr := strings.Join(mntOptions, ",")
// mntOptions should re-split, as some like ["a,b,c", "d"]
mntOptionsList := strings.Split(mntOptionsStr, ",")
tmpOptionsList := []string{}
if strings.Contains(mntOptionsStr, "vers=3.0") {
for _, tmpOptions := range mntOptionsList {
if tmpOptions != "vers=3.0" {
tmpOptionsList = append(tmpOptionsList, tmpOptions)
}
}
return "3", strings.Join(tmpOptionsList, ",")
} else if strings.Contains(mntOptionsStr, "vers=3") {
for _, tmpOptions := range mntOptionsList {
if tmpOptions != "vers=3" {
tmpOptionsList = append(tmpOptionsList, tmpOptions)
}
}
return "3", strings.Join(tmpOptionsList, ",")
} else if strings.Contains(mntOptionsStr, "vers=4.0") {
for _, tmpOptions := range mntOptionsList {
if tmpOptions != "vers=4.0" {
tmpOptionsList = append(tmpOptionsList, tmpOptions)
}
}
return "4.0", strings.Join(tmpOptionsList, ",")
} else if strings.Contains(mntOptionsStr, "vers=4.1") {
for _, tmpOptions := range mntOptionsList {
if tmpOptions != "vers=4.1" {
tmpOptionsList = append(tmpOptionsList, tmpOptions)
}
}
return "4.1", strings.Join(tmpOptionsList, ",")
} else {
return "", strings.Join(mntOptions, ",")
}
}
return "", ""
}
func createLosetupPv(fullPath string, volSizeBytes int64) error {
blockNum := volSizeBytes / (4 * 1024)
fileName := filepath.Join(fullPath, LoopImgFile)
if utils.IsFileExisting(fileName) {
log.Infof("createLosetupPv: image file is exist, just skip: %s", fileName)
return nil
}
imgCmd := fmt.Sprintf("dd if=/dev/zero of=%s bs=4k seek=%d count=0", fileName, blockNum)
_, err := utils.Run(imgCmd)
if err != nil {
return err
}
formatCmd := fmt.Sprintf("mkfs.ext4 -F -m0 %s", fileName)
_, err = utils.Run(formatCmd)
if err != nil {
return err
}
return nil
}
// /var/lib/kubelet/pods/5e03c7f7-2946-4ee1-ad77-2efbc4fdb16c/volumes/kubernetes.io~csi/nas-f5308354-725a-4fd3-b613-0f5b384bd00e/mount
func mountLosetupPv(mountPoint string, opt *Options, volumeID string) error {
pathList := strings.Split(mountPoint, "/")
if len(pathList) != 10 {
return fmt.Errorf("mountLosetupPv: mountPoint format error, %s", mountPoint)
}
podID := pathList[5]
pvName := pathList[8]
// /mnt/nasplugin.alibabacloud.com/6c690876-74aa-46f6-a301-da7f4353665d/pv-losetup/
nfsPath := filepath.Join(NasMntPoint, podID, pvName)
if err := utils.CreateDest(nfsPath); err != nil {
return fmt.Errorf("mountLosetupPv: create nfs mountPath error %s ", err.Error())
}
err := DoNfsMount(opt.Server, opt.Path, opt.Vers, opt.Options, nfsPath, volumeID)
if err != nil {
return fmt.Errorf("mountLosetupPv: mount losetup volume failed: %s", err.Error())
}
lockFile := filepath.Join(nfsPath, LoopLockFile)
if opt.LoopLock == "true" && isLosetupUsed(lockFile, opt, volumeID) {
return fmt.Errorf("mountLosetupPv: nfs losetup file is used by others %s", lockFile)
}
imgFile := filepath.Join(nfsPath, LoopImgFile)
failedFile := filepath.Join(nfsPath, Resize2fsFailedFilename)
if utils.IsFileExisting(failedFile) {
// path/to/whatever does not exist
cmd := fmt.Sprintf(Resize2fsFailedFixCmd, NsenterCmd, imgFile)
_, err = utils.Run(cmd)
if err != nil {
return fmt.Errorf("mountLosetupPv: mount nfs losetup error %s", err.Error())
}
err = os.Remove(failedFile)
if err != nil {
log.Errorf("mountLosetupPv: failed to remove failed file: %v", err)
}
}
mountCmd := fmt.Sprintf("%s mount -o loop %s %s", NsenterCmd, imgFile, mountPoint)
_, err = utils.Run(mountCmd)
if err != nil {
return fmt.Errorf("mountLosetupPv: mount nfs losetup error %s", err.Error())
}
lockContent := GlobalConfigVar.NodeID + ":" + GlobalConfigVar.NodeIP
if err := ioutil.WriteFile(lockFile, ([]byte)(lockContent), 0644); err != nil {
return err
}
return nil
}
func isLosetupUsed(lockFile string, opt *Options, volumeID string) bool {
if !utils.IsFileExisting(lockFile) {
return false
}
fileCotent := utils.GetFileContent(lockFile)
contentParts := strings.Split(fileCotent, ":")
if len(contentParts) != 2 || contentParts[0] == "" || contentParts[1] == "" {
return true
}
oldNodeID := contentParts[0]
oldNodeIP := contentParts[1]
if GlobalConfigVar.NodeID == oldNodeID {
if !isLosetupMount(volumeID) {
log.Warnf("Lockfile(%s) exist, but Losetup image not mounted %s.", lockFile, opt.Path)
return false
}
log.Warnf("Lockfile(%s) exist, but Losetup image mounted %s.", lockFile, opt.Path)
return true
}
stat, err := utils.Ping(oldNodeIP)
if err != nil {
log.Warnf("Ping node %s, but get error: %s, consider as volume used", oldNodeIP, err.Error())
return true
}
if stat.PacketLoss == 100 {
log.Warnf("Cannot connect to node %s, consider the node as shutdown(%s).", oldNodeIP, lockFile)
return false
}
return true
}
func checkLosetupUnmount(mountPoint string) error {
pathList := strings.Split(mountPoint, "/")
if len(pathList) != 10 {
log.Infof("MountPoint not format as losetup type: %s", mountPoint)
return nil
}
podID := pathList[5]
pvName := pathList[8]
nfsPath := filepath.Join(NasMntPoint, podID, pvName)
imgFile := filepath.Join(nfsPath, LoopImgFile)
lockFile := filepath.Join(nfsPath, LoopLockFile)
if utils.IsFileExisting(imgFile) {
if err := os.Remove(lockFile); err != nil {
return fmt.Errorf("checkLosetupUnmount: remove lock file error %v", err)
}
if err := utils.Umount(nfsPath); err != nil {
return fmt.Errorf("checkLosetupUnmount: umount nfs path error %v", err)
}
log.Infof("Losetup Unmount successful %s", mountPoint)
} else {
log.Infof("Losetup Unmount, image file not exist, skipping %s", mountPoint)
}
return nil
}
func isLosetupMount(volumeID string) bool {
keyWord := volumeID + "/" + LoopImgFile
cmd := fmt.Sprintf("mount | grep %s |grep -v grep |wc -l", keyWord)
out, err := utils.Run(cmd)
if err != nil {
log.Infof("isLosetupMount: exec error: %s, %s", cmd, err.Error())
return false
}
if strings.TrimSpace(out) == "0" {
return false
}
return true
}
func getPvObj(volumeID string) (*v1.PersistentVolume, error) {
return GlobalConfigVar.KubeClient.CoreV1().PersistentVolumes().Get(context.Background(), volumeID, metav1.GetOptions{})
}
func isValidCnfsParameter(server string, cnfsName string) error {
if len(server) == 0 && len(cnfsName) == 0 {
msg := fmt.Sprintf("Server and ContainerNetworkFileSystem need to be configured at least one.")
log.Errorf(msg)
return errors.New(msg)
}
if len(server) != 0 && len(cnfsName) != 0 {
msg := fmt.Sprintf("Server and ContainerNetworkFileSystem can only be configured to use one.")
log.Errorf(msg)
return errors.New(msg)
}
return nil
}
| [
"\"NAS_ENDPOINT\""
]
| []
| [
"NAS_ENDPOINT"
]
| [] | ["NAS_ENDPOINT"] | go | 1 | 0 | |
backend/auth/auth.go | // Package auth provides functions useful for using authentication in this API.
package auth
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/soumitradev/Dwitter/backend/common"
"github.com/soumitradev/Dwitter/backend/prisma/db"
"github.com/golang-jwt/jwt/v4"
"github.com/golang/gddo/httputil/header"
)
// A tokenType stores an access and refresh token
type tokenType struct {
AccessToken string `json:"accessToken"`
RefreshToken string `json:"refreshToken"`
}
// A loginResponse stores the response to authentication
type loginResponse struct {
AccessToken string `json:"accessToken"`
JID string `json:"jid"`
}
// A loginType stores login info
type loginType struct {
Username string `json:"username"`
Password string `json:"password"`
}
// Split "Bearer XXXXXXXXXXXX" and return the token part
func SplitAuthToken(headerString string) string {
tokenArr := strings.Split(headerString, " ")
tokenString := ""
if len(tokenArr) == 2 {
tokenString = tokenArr[1]
}
return tokenString
}
// Split "xyz=AAAAAAA" and return the AAAAAAA part
func splitCookie(cookieString string) string {
arr := strings.Split(cookieString, "=")
val := ""
if len(arr) == 2 {
val = arr[1]
}
return val
}
// Generate an Access Token
func generateAccessToken(username string) (string, error) {
// Check if user exists
_, err := common.Client.User.FindUnique(
db.User.Username.Equals(username),
).Exec(common.BaseCtx)
if err == db.ErrNotFound {
return "", errors.New("user doesn't exist")
}
// Save data in claims and generate token
tokenClaims := jwt.MapClaims{}
tokenClaims["authorized"] = true
tokenClaims["username"] = username
tokenClaims["exp"] = time.Now().Add(time.Minute * 15).Unix()
accessToken := jwt.NewWithClaims(jwt.SigningMethodHS256, tokenClaims)
token, err := accessToken.SignedString([]byte(os.Getenv("ACCESS_SECRET")))
if err != nil {
return "", err
}
return token, nil
}
// Generate a Refresh Token
func generateRefreshToken(username string) (string, error) {
// Check if user exists
userDB, err := common.Client.User.FindUnique(
db.User.Username.Equals(username),
).Exec(common.BaseCtx)
if err == db.ErrNotFound {
return "", errors.New("user doesn't exist")
}
// Save data in claims and generate token
tokenClaims := jwt.MapClaims{}
tokenClaims["authorized"] = true
tokenClaims["username"] = username
tokenClaims["token_version"] = userDB.TokenVersion
tokenClaims["exp"] = time.Now().Add(time.Hour * 24 * 7).Unix()
accessToken := jwt.NewWithClaims(jwt.SigningMethodHS256, tokenClaims)
token, err := accessToken.SignedString([]byte(os.Getenv("REFRESH_SECRET")))
if err != nil {
return "", err
}
return token, nil
}
// Authorize user and return tokens
func generateTokens(username string, password string) (tokenType, error) {
authenticated, authErr := common.CheckCreds(username, password)
if authenticated {
JWT, err := generateAccessToken(username)
if err != nil {
return tokenType{}, err
}
refTok, err := generateRefreshToken(username)
if err != nil {
return tokenType{}, err
}
return tokenType{
AccessToken: JWT,
RefreshToken: refTok,
}, err
}
return tokenType{}, authErr
}
// Verify an Access Token
func VerifyAccessToken(tokenString string) (jwt.MapClaims, bool, error) {
// Handle empty token string
if tokenString == "" {
return jwt.MapClaims{}, false, nil
}
// Validate token
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("ACCESS_SECRET")), nil
})
if err != nil {
return jwt.MapClaims{}, false, fmt.Errorf("authentication error: %v", err)
}
// Extract metadata from token
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
// Check for username field
_, ok := claims["username"].(string)
if !ok {
return jwt.MapClaims{}, false, errors.New("field username not found in access token")
}
_, err = common.Client.User.FindUnique(
db.User.Username.Equals(claims["username"].(string)),
).Exec(common.BaseCtx)
if err == db.ErrNotFound {
return jwt.MapClaims{}, false, errors.New("user doesn't exist")
}
return claims, true, nil
} else {
return jwt.MapClaims{}, false, nil
}
}
// Verify a Refresh Token
func verifyRefreshToken(tokenString string) (jwt.MapClaims, bool, error) {
// Validate token
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("REFRESH_SECRET")), nil
})
if err != nil {
return jwt.MapClaims{}, false, fmt.Errorf("authentication error: %v", err)
}
// Extract metadata from token
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
// Check for username field
username, ok := claims["username"].(string)
if !ok {
return jwt.MapClaims{}, false, errors.New("field username not found in refresh token")
}
// Check for token_version field
tokenV, ok := claims["token_version"].(float64)
if !ok {
return jwt.MapClaims{}, false, errors.New("field token_version not found in refresh token")
}
userDB, err := common.Client.User.FindUnique(
db.User.Username.Equals(username),
).Exec(common.BaseCtx)
if err == db.ErrNotFound {
return jwt.MapClaims{}, false, errors.New("user doesn't exist")
}
if userDB.TokenVersion != int(tokenV) {
return jwt.MapClaims{}, false, errors.New("invalid token version")
}
return claims, true, nil
} else {
return jwt.MapClaims{}, false, errors.New("unauthorized")
}
}
// Handles login requests
func LoginHandler(w http.ResponseWriter, r *http.Request) {
// Check if content type is "application/json"
if r.Header.Get("Content-Type") != "" {
value, _ := header.ParseValueAndParams(r.Header, "Content-Type")
if value != "application/json" {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: "Content-Type header is not application/json",
})
return
}
}
// Read a maximum of 1MB from body
r.Body = http.MaxBytesReader(w, r.Body, 1048576)
// Create a JSON decoder and decode the request JSON
decoder := json.NewDecoder(r.Body)
decoder.DisallowUnknownFields()
var loginData loginType
err := decoder.Decode(&loginData)
// If any error occurred during the decoding, send an appropriate response
if err != nil {
var syntaxError *json.SyntaxError
var unmarshalTypeError *json.UnmarshalTypeError
// Return errors based on what error JSON parser returned
switch {
case errors.As(err, &syntaxError):
msg := fmt.Sprintf("Request body contains badly-formed JSON (at position %d)", syntaxError.Offset)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case errors.Is(err, io.ErrUnexpectedEOF):
msg := "Request body contains badly-formed JSON"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case errors.As(err, &unmarshalTypeError):
msg := fmt.Sprintf("Request body contains an invalid value for the %q field (at position %d)", unmarshalTypeError.Field, unmarshalTypeError.Offset)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case strings.HasPrefix(err.Error(), "json: unknown field "):
fieldName := strings.TrimPrefix(err.Error(), "json: unknown field ")
msg := fmt.Sprintf("Request body contains unknown field %s", fieldName)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case errors.Is(err, io.EOF):
msg := "Request body must not be empty"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case err.Error() == "http: request body too large":
msg := "Request body must not be larger than 1MB"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusRequestEntityTooLarge)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
default:
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(common.HTTPError{
Error: err.Error(),
})
}
return
}
// Decode it and check for an external JSON error
err = decoder.Decode(&struct{}{})
if err != io.EOF {
msg := "Request body must only contain a single JSON object"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
return
}
// After checking for any errors, log the user in, and generate tokens
tokenData, err := generateTokens(loginData.Username, loginData.Password)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(common.HTTPError{
Error: err.Error(),
})
return
}
// Send the refresh token in a HTTPOnly cookie
c := http.Cookie{
Name: "jid",
Value: tokenData.RefreshToken,
HttpOnly: true,
Secure: true,
Path: "/api/refresh_token",
}
http.SetCookie(w, &c)
// Set the response headers
w.Header().Set("Content-Type", "application/json")
// Send the access token in JSON
json.NewEncoder(w).Encode(loginResponse{
AccessToken: tokenData.AccessToken,
JID: tokenData.RefreshToken,
})
}
// Handle refresh-token requests
func RefreshHandler(w http.ResponseWriter, r *http.Request) {
// Check if content type is "application/json"
if r.Header.Get("Content-Type") != "" {
value, _ := header.ParseValueAndParams(r.Header, "Content-Type")
if value != "application/json" {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: "Content-Type header is not application/json",
})
return
}
}
// Read a maximum of 1MB from body
r.Body = http.MaxBytesReader(w, r.Body, 1048576)
// Create a JSON decoder and decode the request JSON
decoder := json.NewDecoder(r.Body)
decoder.DisallowUnknownFields()
var loginData loginResponse
err := decoder.Decode(&loginData)
// If any error occurred during the decoding, send an appropriate response
if err != nil {
var syntaxError *json.SyntaxError
var unmarshalTypeError *json.UnmarshalTypeError
// Return errors based on what error JSON parser returned
switch {
case errors.As(err, &syntaxError):
msg := fmt.Sprintf("Request body contains badly-formed JSON (at position %d)", syntaxError.Offset)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case errors.Is(err, io.ErrUnexpectedEOF):
msg := "Request body contains badly-formed JSON"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case errors.As(err, &unmarshalTypeError):
msg := fmt.Sprintf("Request body contains an invalid value for the %q field (at position %d)", unmarshalTypeError.Field, unmarshalTypeError.Offset)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case strings.HasPrefix(err.Error(), "json: unknown field "):
fieldName := strings.TrimPrefix(err.Error(), "json: unknown field ")
msg := fmt.Sprintf("Request body contains unknown field %s", fieldName)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
case err.Error() == "http: request body too large":
msg := "Request body must not be larger than 1MB"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusRequestEntityTooLarge)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
default:
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(common.HTTPError{
Error: err.Error(),
})
}
return
}
// Decode it and check for an external JSON error
err = decoder.Decode(&struct{}{})
if err != io.EOF {
msg := "Request body must only contain a single JSON object"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
return
}
cookieString, err := r.Cookie("jid")
if err != nil && loginData.JID == "" {
msg := "Refresh Token not present"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
return
}
var token string
if cookieString.String() != "" {
token = splitCookie(cookieString.String())
} else {
token = loginData.JID
}
claims, verified, err := verifyRefreshToken(token)
if (err != nil) || (!verified) {
msg := fmt.Sprintf("Unauthorized: %v", err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
return
}
userID, ok := claims["username"].(string)
if !ok {
msg := "Invalid refresh token"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
return
}
refTok, err := generateRefreshToken(userID)
if err != nil {
msg := "Invalid refresh token"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
return
}
// Send the refresh token in a HTTPOnly cookie
c := http.Cookie{
Name: "jid",
Value: refTok,
HttpOnly: true,
Secure: true,
Path: "/api/refresh_token",
}
http.SetCookie(w, &c)
accessTok, err := generateAccessToken(userID)
if err != nil {
msg := "Invalid refresh token"
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(common.HTTPError{
Error: msg,
})
return
}
// Set the response headers
w.Header().Set("Content-Type", "application/json")
// Send the access token in JSON
json.NewEncoder(w).Encode(loginResponse{
AccessToken: accessTok,
JID: refTok,
})
}
// Check header of request and authenticate
func Authenticate(authHeader string) (string, error) {
tokenString := SplitAuthToken(authHeader)
data, isAuth, err := VerifyAccessToken(tokenString)
if (err != nil) || !isAuth {
return "", errors.New("Unauthorized")
}
username := data["username"].(string)
return username, nil
}
| [
"\"ACCESS_SECRET\"",
"\"REFRESH_SECRET\"",
"\"ACCESS_SECRET\"",
"\"REFRESH_SECRET\""
]
| []
| [
"ACCESS_SECRET",
"REFRESH_SECRET"
]
| [] | ["ACCESS_SECRET", "REFRESH_SECRET"] | go | 2 | 0 | |
pkg/lifecycle/shutdown/shutdown.go | // Copyright (c) 2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shutdown
import (
"os"
"time"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/projectcalico/node/pkg/lifecycle/utils"
)
// This file contains the main shutdown processing for the calico/node. This
// includes:
// - Save time stamp to shutdown file.
// - Set node condition to "networkUnavailable=true"
func Run() {
// Save shutdown timestamp immediately.
// Depends on how we configure termination grace period,
// the shutdown process can be killed at any given time.
if err := utils.SaveShutdownTimestamp(); err != nil {
log.WithError(err).Errorf("Unable to save shutdown timestamp")
}
// Determine the name for this node.
nodeName := utils.DetermineNodeName()
log.Infof("Shutting down node %s", nodeName)
var clientset *kubernetes.Clientset
// If running under kubernetes with secrets to call k8s API
if config, err := rest.InClusterConfig(); err == nil {
// default timeout is 30 seconds, which isn't appropriate for this kind of
// shutdown action because network services, like kube-proxy might not be
// running and we don't want to block the full 30 seconds if they are just
// a few seconds behind.
config.Timeout = 2 * time.Second
// Create the k8s clientset.
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
log.WithError(err).Error("Failed to create clientset")
return
}
}
// If Calico is running in policy only mode we don't need to set node conditions.
if os.Getenv("CALICO_NETWORKING_BACKEND") != "none" {
if clientset != nil {
// Determine the Kubernetes node name. Default to the Calico node name unless an explicit
// value is provided.
k8sNodeName := nodeName
if nodeRef := os.Getenv("CALICO_K8S_NODE_REF"); nodeRef != "" {
k8sNodeName = nodeRef
}
hundredYears := 876600 * time.Hour
// Set node condition with a big timeout value (100 years).
// The maximum execution time for the shutdown process is defined by terminationGracePeriod of calico-node.
// Depends on how we configure terminationGracePeriod (currently 5 seconds with operator install),
// this operation may not be successful if it takes too long to update node condition.
err := utils.SetNodeNetworkUnavailableCondition(*clientset, k8sNodeName, true, hundredYears)
if err != nil {
log.WithError(err).Error("Unable to set NetworkUnavailable to true")
return
}
}
}
}
| [
"\"CALICO_NETWORKING_BACKEND\"",
"\"CALICO_K8S_NODE_REF\""
]
| []
| [
"CALICO_K8S_NODE_REF",
"CALICO_NETWORKING_BACKEND"
]
| [] | ["CALICO_K8S_NODE_REF", "CALICO_NETWORKING_BACKEND"] | go | 2 | 0 | |
pkg/store/secret_service_test.go | // +build linux
package store_test
import (
"os"
"testing"
"github.com/99designs/keyring"
"github.com/joemiller/vault-token-helper/pkg/store"
"github.com/stretchr/testify/assert"
)
func TestSecretServiceStore(t *testing.T) {
// TODO: get this working in CI. The current blocker is needing to have a dbus prompter service that
// can be driven automatically and headless.
if os.Getenv("CI") != "" {
t.Skip("Skipping testing in CI environment")
}
st, err := store.New(keyring.Config{
ServiceName: "test",
KeychainTrustApplication: true,
LibSecretCollectionName: "test",
AllowedBackends: []keyring.BackendType{keyring.SecretServiceBackend},
})
assert.Nil(t, err)
assert.NotNil(t, st)
// should be empty
tokens, err := st.List()
assert.Nil(t, err)
assert.Empty(t, tokens)
// Get of a missing item should not return an error
_, err = st.Get("https://localhost:8200")
assert.Nil(t, err)
// Store a token
token1 := store.Token{
VaultAddr: "https://localhost:8200",
Token: "token-foo",
}
err = st.Store(token1)
assert.Nil(t, err)
// GetAll tokens
tokens, err = st.List()
assert.Nil(t, err)
assert.NotEmpty(t, tokens)
// Get a token by addr. Mixed case addr should be normalized for a successful lookup
v1, err := st.Get("httpS://LOCALhost:8200/")
assert.Nil(t, err)
assert.Equal(t, token1, v1)
// Erase
err = st.Erase("https://localhost:8200")
assert.Nil(t, err)
// empty token store
tokens, err = st.List()
assert.Nil(t, err)
assert.Empty(t, tokens)
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
flow/envs/base_env.py | """Base environment class. This is the parent of all other environments."""
from copy import deepcopy
import os
import atexit
import time
import traceback
import numpy as np
import random
from flow.renderer.pyglet_renderer import PygletRenderer as Renderer
import gym
from gym.spaces import Box
from traci.exceptions import FatalTraCIError
from traci.exceptions import TraCIException
import sumolib
try:
# Import serializable if rllab is installed
from rllab.core.serializable import Serializable
serializable_flag = True
except ImportError:
serializable_flag = False
from flow.core.util import ensure_dir
from flow.core.kernel import Kernel
from flow.utils.exceptions import FatalFlowError
# pick out the correct class definition
if serializable_flag:
classdef = (gym.Env, Serializable)
else:
classdef = (gym.Env,)
class Env(*classdef):
"""Base environment class.
Provides the interface for controlling a SUMO simulation. Using this
class, you can start sumo, provide a scenario to specify a
configuration and controllers, perform simulation steps, and reset the
simulation to an initial configuration.
Env is Serializable to allow for pickling and replaying of the policy.
This class cannot be used as is: you must extend it to implement an
action applicator method, and properties to define the MDP if you
choose to use it with an rl library (e.g. RLlib). This can be done by
overloading the following functions in a child class:
* action_space
* observation_space
* apply_rl_action
* get_state
* compute_reward
Attributes
----------
env_params : flow.core.params.EnvParams
see flow/core/params.py
sim_params : flow.core.params.SimParams
see flow/core/params.py
scenario : flow.scenarios.Scenario
see flow/scenarios/base_scenario.py
simulator : str
the simulator used, one of {'traci', 'aimsun'}. Defaults to 'traci'
"""
def __init__(self, env_params, sim_params, scenario, simulator='traci'):
"""Initialize the environment class.
Parameters
----------
env_params : flow.core.params.EnvParams
see flow/core/params.py
sim_params : flow.core.params.SimParams
see flow/core/params.py
scenario : flow.scenarios.Scenario
see flow/scenarios/base_scenario.py
simulator : str
the simulator used, one of {'traci', 'aimsun'}. Defaults to 'traci'
Raises
------
flow.utils.exceptions.FatalFlowError
if the render mode is not set to a valid value
"""
# Invoke serializable if using rllab
if serializable_flag:
Serializable.quick_init(self, locals())
self.env_params = env_params
self.scenario = scenario
self.sim_params = sim_params
time_stamp = ''.join(str(time.time()).split('.'))
if os.environ.get("TEST_FLAG", 0):
# 1.0 works with stress_test_start 10k times
time.sleep(1.0 * int(time_stamp[-6:]) / 1e6)
# FIXME: this is sumo-specific
self.sim_params.port = sumolib.miscutils.getFreeSocketPort()
# time_counter: number of steps taken since the start of a rollout
self.time_counter = 0
# step_counter: number of total steps taken
self.step_counter = 0
# initial_state:
# Key = Vehicle ID,
# Entry = (type_id, route_id, lane_index, lane_pos, speed, pos)
self.initial_state = {}
self.state = None
self.obs_var_labels = []
# simulation step size
self.sim_step = sim_params.sim_step
# the simulator used by this environment
self.simulator = simulator
# create the Flow kernel
self.k = Kernel(simulator=self.simulator,
sim_params=sim_params)
# use the scenario class's network parameters to generate the necessary
# scenario components within the scenario kernel
self.k.scenario.generate_network(scenario)
# initial the vehicles kernel using the VehicleParams object
self.k.vehicle.initialize(deepcopy(scenario.vehicles))
# initialize the simulation using the simulation kernel. This will use
# the scenario kernel as an input in order to determine what network
# needs to be simulated.
kernel_api = self.k.simulation.start_simulation(
scenario=self.k.scenario, sim_params=sim_params)
# pass the kernel api to the kernel and it's subclasses
self.k.pass_api(kernel_api)
# the available_routes variable contains a dictionary of routes
# vehicles can traverse; to be used when routes need to be chosen
# dynamically
self.available_routes = self.k.scenario.rts
# store the initial vehicle ids
self.initial_ids = deepcopy(scenario.vehicles.ids)
# store the initial state of the vehicles kernel (needed for restarting
# the simulation)
self.k.vehicle.kernel_api = None
self.k.vehicle.master_kernel = None
self.initial_vehicles = deepcopy(self.k.vehicle)
self.k.vehicle.kernel_api = self.k.kernel_api
self.k.vehicle.master_kernel = self.k
self.setup_initial_state()
# use pyglet to render the simulation
if self.sim_params.render in ['gray', 'dgray', 'rgb', 'drgb']:
save_render = self.sim_params.save_render
sight_radius = self.sim_params.sight_radius
pxpm = self.sim_params.pxpm
show_radius = self.sim_params.show_radius
# get network polygons
network = []
# FIXME: add to scenario kernel instead of hack
for lane_id in self.k.kernel_api.lane.getIDList():
_lane_poly = self.k.kernel_api.lane.getShape(lane_id)
lane_poly = [i for pt in _lane_poly for i in pt]
network.append(lane_poly)
# instantiate a pyglet renderer
self.renderer = Renderer(
network,
self.sim_params.render,
save_render,
sight_radius=sight_radius,
pxpm=pxpm,
show_radius=show_radius)
# render a frame
self.render(reset=True)
elif self.sim_params.render in [True, False]:
pass # default to sumo-gui (if True) or sumo (if False)
else:
raise FatalFlowError(
'Mode %s is not supported!' % self.sim_params.render)
atexit.register(self.terminate)
def restart_simulation(self, sim_params, render=None):
"""Restart an already initialized simulation instance.
This is used when visualizing a rollout, in order to update the
rendering with potentially a gui and export emission data from sumo.
This is also used to handle cases when the runtime of an experiment is
too long, causing the sumo instance
Parameters
----------
sim_params : flow.core.params.SimParams
simulation-specific parameters
render : bool, optional
specifies whether to use the gui
"""
self.k.close()
# killed the sumo process if using sumo/TraCI
if self.simulator == 'traci':
self.k.simulation.sumo_proc.kill()
if render is not None:
self.sim_params.render = render
if sim_params.emission_path is not None:
ensure_dir(sim_params.emission_path)
self.sim_params.emission_path = sim_params.emission_path
self.k.scenario.generate_network(self.scenario)
self.k.vehicle.initialize(deepcopy(self.scenario.vehicles))
kernel_api = self.k.simulation.start_simulation(
scenario=self.k.scenario, sim_params=self.sim_params)
self.k.pass_api(kernel_api)
self.setup_initial_state()
def setup_initial_state(self):
"""Store information on the initial state of vehicles in the network.
This information is to be used upon reset. This method also adds this
information to the self.vehicles class and starts a subscription with
sumo to collect state information each step.
"""
# determine whether to shuffle the vehicles
if self.scenario.initial_config.shuffle:
random.shuffle(self.initial_ids)
# generate starting position for vehicles in the network
start_pos, start_lanes = self.k.scenario.generate_starting_positions(
initial_config=self.scenario.initial_config,
num_vehicles=len(self.initial_ids))
# save the initial state. This is used in the _reset function
for i, veh_id in enumerate(self.initial_ids):
type_id = self.scenario.vehicles.get_type(veh_id)
pos = start_pos[i][1]
lane = start_lanes[i]
speed = self.scenario.vehicles.get_initial_speed(veh_id)
edge = start_pos[i][0]
self.initial_state[veh_id] = (type_id, edge, lane, pos, speed)
def step(self, rl_actions):
"""Advance the environment by one step.
Assigns actions to autonomous and human-driven agents (i.e. vehicles,
traffic lights, etc...). Actions that are not assigned are left to the
control of the simulator. The actions are then used to advance the
simulator by the number of time steps requested per environment step.
Results from the simulations are processed through various classes,
such as the Vehicle and TrafficLight kernels, to produce standardized
methods for identifying specific network state features. Finally,
results from the simulator are used to generate appropriate
observations.
Parameters
----------
rl_actions : array_like
an list of actions provided by the rl algorithm
Returns
-------
observation : array_like
agent's observation of the current environment
reward : float
amount of reward associated with the previous state/action pair
done : bool
indicates whether the episode has ended
info : dict
contains other diagnostic information from the previous action
"""
for _ in range(self.env_params.sims_per_step):
self.time_counter += 1
self.step_counter += 1
# perform acceleration actions for controlled human-driven vehicles
if len(self.k.vehicle.get_controlled_ids()) > 0:
accel = []
for veh_id in self.k.vehicle.get_controlled_ids():
action = self.k.vehicle.get_acc_controller(
veh_id).get_action(self)
accel.append(action)
self.k.vehicle.apply_acceleration(
self.k.vehicle.get_controlled_ids(), accel)
# perform lane change actions for controlled human-driven vehicles
if len(self.k.vehicle.get_controlled_lc_ids()) > 0:
direction = []
for veh_id in self.k.vehicle.get_controlled_lc_ids():
target_lane = self.k.vehicle.get_lane_changing_controller(
veh_id).get_action(self)
direction.append(target_lane)
self.k.vehicle.apply_lane_change(
self.k.vehicle.get_controlled_lc_ids(),
direction=direction)
# perform (optionally) routing actions for all vehicles in the
# network, including RL and SUMO-controlled vehicles
routing_ids = []
routing_actions = []
for veh_id in self.k.vehicle.get_ids():
if self.k.vehicle.get_routing_controller(veh_id) \
is not None:
routing_ids.append(veh_id)
route_contr = self.k.vehicle.get_routing_controller(
veh_id)
routing_actions.append(route_contr.choose_route(self))
self.k.vehicle.choose_routes(routing_ids, routing_actions)
self.apply_rl_actions(rl_actions)
self.additional_command()
# advance the simulation in the simulator by one step
self.k.simulation.simulation_step()
# store new observations in the vehicles and traffic lights class
self.k.update(reset=False)
# update the colors of vehicles
if self.sim_params.render:
self.k.vehicle.update_vehicle_colors()
# crash encodes whether the simulator experienced a collision
crash = self.k.simulation.check_collision()
# stop collecting new simulation steps if there is a collision
if crash:
break
# render a frame
self.render()
states = self.get_state()
# collect information of the state of the network based on the
# environment class used
self.state = np.asarray(states).T
# collect observation new state associated with action
next_observation = np.copy(states)
# test if the environment should terminate due to a collision or the
# time horizon being met
done = crash or (self.time_counter >= self.env_params.warmup_steps
+ self.env_params.horizon)
# compute the info for each agent
infos = {}
# compute the reward
rl_clipped = self.clip_actions(rl_actions)
reward = self.compute_reward(rl_clipped, fail=crash)
return next_observation, reward, done, infos
def reset(self):
"""Reset the environment.
This method is performed in between rollouts. It resets the state of
the environment, and re-initializes the vehicles in their starting
positions.
If "shuffle" is set to True in InitialConfig, the initial positions of
vehicles is recalculated and the vehicles are shuffled.
Returns
-------
observation : array_like
the initial observation of the space. The initial reward is assumed
to be zero.
"""
# reset the time counter
self.time_counter = 0
# warn about not using restart_instance when using inflows
if len(self.scenario.net_params.inflows.get()) > 0 and \
not self.sim_params.restart_instance:
print(
"**********************************************************\n"
"**********************************************************\n"
"**********************************************************\n"
"WARNING: Inflows will cause computational performance to\n"
"significantly decrease after large number of rollouts. In \n"
"order to avoid this, set SumoParams(restart_instance=True).\n"
"**********************************************************\n"
"**********************************************************\n"
"**********************************************************"
)
if self.sim_params.restart_instance or \
(self.step_counter > 2e6 and self.simulator != 'aimsun'):
self.step_counter = 0
# issue a random seed to induce randomness into the next rollout
self.sim_params.seed = random.randint(0, 1e5)
self.k.vehicle = deepcopy(self.initial_vehicles)
self.k.vehicle.master_kernel = self.k
# restart the sumo instance
self.restart_simulation(self.sim_params)
# perform shuffling (if requested)
elif self.scenario.initial_config.shuffle:
self.setup_initial_state()
# clear all vehicles from the network and the vehicles class
if self.simulator == 'traci':
for veh_id in self.k.kernel_api.vehicle.getIDList(): # FIXME: hack
try:
self.k.vehicle.remove(veh_id)
except (FatalTraCIError, TraCIException):
pass
# clear all vehicles from the network and the vehicles class
# FIXME (ev, ak) this is weird and shouldn't be necessary
for veh_id in list(self.k.vehicle.get_ids()):
# do not try to remove the vehicles from the network in the first
# step after initializing the network, as there will be no vehicles
if self.step_counter == 0:
continue
try:
self.k.vehicle.remove(veh_id)
except (FatalTraCIError, TraCIException):
print("Error during start: {}".format(traceback.format_exc()))
# reintroduce the initial vehicles to the network
for veh_id in self.initial_ids:
type_id, edge, lane_index, pos, speed = \
self.initial_state[veh_id]
try:
self.k.vehicle.add(
veh_id=veh_id,
type_id=type_id,
edge=edge,
lane=lane_index,
pos=pos,
speed=speed)
except (FatalTraCIError, TraCIException):
# if a vehicle was not removed in the first attempt, remove it
# now and then reintroduce it
self.k.vehicle.remove(veh_id)
if self.simulator == 'traci':
self.k.kernel_api.vehicle.remove(veh_id) # FIXME: hack
self.k.vehicle.add(
veh_id=veh_id,
type_id=type_id,
edge=edge,
lane=lane_index,
pos=pos,
speed=speed)
# advance the simulation in the simulator by one step
self.k.simulation.simulation_step()
# update the information in each kernel to match the current state
self.k.update(reset=True)
# update the colors of vehicles
if self.sim_params.render:
self.k.vehicle.update_vehicle_colors()
# check to make sure all vehicles have been spawned
if len(self.initial_ids) > self.k.vehicle.num_vehicles:
missing_vehicles = list(
set(self.initial_ids) - set(self.k.vehicle.get_ids()))
msg = '\nNot enough vehicles have spawned! Bad start?\n' \
'Missing vehicles / initial state:\n'
for veh_id in missing_vehicles:
msg += '- {}: {}\n'.format(veh_id, self.initial_state[veh_id])
raise FatalFlowError(msg=msg)
states = self.get_state()
# collect information of the state of the network based on the
# environment class used
self.state = np.asarray(states).T
# observation associated with the reset (no warm-up steps)
observation = np.copy(states)
# perform (optional) warm-up steps before training
for _ in range(self.env_params.warmup_steps):
observation, _, _, _ = self.step(rl_actions=None)
# render a frame
self.render(reset=True)
return observation
def additional_command(self):
"""Additional commands that may be performed by the step method."""
pass
def clip_actions(self, rl_actions=None):
"""Clip the actions passed from the RL agent.
Parameters
----------
rl_actions : array_like
list of actions provided by the RL algorithm
Returns
-------
array_like
The rl_actions clipped according to the box
"""
# ignore if no actions are issued
if rl_actions is None:
return None
# clip according to the action space requirements
if isinstance(self.action_space, Box):
rl_actions = np.clip(
rl_actions,
a_min=self.action_space.low,
a_max=self.action_space.high)
return rl_actions
def apply_rl_actions(self, rl_actions=None):
"""Specify the actions to be performed by the rl agent(s).
If no actions are provided at any given step, the rl agents default to
performing actions specified by SUMO.
Parameters
----------
rl_actions : array_like
list of actions provided by the RL algorithm
"""
# ignore if no actions are issued
if rl_actions is None:
return
rl_clipped = self.clip_actions(rl_actions)
self._apply_rl_actions(rl_clipped)
def _apply_rl_actions(self, rl_actions):
raise NotImplementedError
def get_state(self):
"""Return the state of the simulation as perceived by the RL agent.
MUST BE implemented in new environments.
Returns
-------
state : array_like
information on the state of the vehicles, which is provided to the
agent
"""
raise NotImplementedError
@property
def action_space(self):
"""Identify the dimensions and bounds of the action space.
MUST BE implemented in new environments.
Returns
-------
gym Box or Tuple type
a bounded box depicting the shape and bounds of the action space
"""
raise NotImplementedError
@property
def observation_space(self):
"""Identify the dimensions and bounds of the observation space.
MUST BE implemented in new environments.
Returns
-------
gym Box or Tuple type
a bounded box depicting the shape and bounds of the observation
space
"""
raise NotImplementedError
def compute_reward(self, rl_actions, **kwargs):
"""Reward function for the RL agent(s).
MUST BE implemented in new environments.
Defaults to 0 for non-implemented environments.
Parameters
----------
rl_actions : array_like
actions performed by rl vehicles
kwargs : dict
other parameters of interest. Contains a "fail" element, which
is True if a vehicle crashed, and False otherwise
Returns
-------
reward : float or list of float
"""
return 0
def terminate(self):
"""Close the TraCI I/O connection.
Should be done at end of every experiment. Must be in Env because the
environment opens the TraCI connection.
"""
try:
print(
"Closing connection to TraCI and stopping simulation.\n"
"Note, this may print an error message when it closes."
)
self.k.close()
# close pyglet renderer
if self.sim_params.render in ['gray', 'dgray', 'rgb', 'drgb']:
self.renderer.close()
except FileNotFoundError:
print("Skip automatic termination. "
"Connection is probably already closed.")
def render(self, reset=False, buffer_length=5):
"""Render a frame.
Parameters
----------
reset : bool
set to True to reset the buffer
buffer_length : int
length of the buffer
"""
if self.sim_params.render in ['gray', 'dgray', 'rgb', 'drgb']:
# render a frame
self.pyglet_render()
# cache rendering
if reset:
self.frame_buffer = [self.frame.copy() for _ in range(5)]
self.sights_buffer = [self.sights.copy() for _ in range(5)]
else:
if self.step_counter % int(1/self.sim_step) == 0:
self.frame_buffer.append(self.frame.copy())
self.sights_buffer.append(self.sights.copy())
if len(self.frame_buffer) > buffer_length:
self.frame_buffer.pop(0)
self.sights_buffer.pop(0)
def pyglet_render(self):
"""Render a frame using pyglet."""
# get human and RL simulation status
human_idlist = self.k.vehicle.get_human_ids()
machine_idlist = self.k.vehicle.get_rl_ids()
human_logs = []
human_orientations = []
human_dynamics = []
machine_logs = []
machine_orientations = []
machine_dynamics = []
max_speed = self.k.scenario.max_speed()
for id in human_idlist:
# Force tracking human vehicles by adding "track" in vehicle id.
# The tracked human vehicles will be treated as machine vehicles.
if 'track' in id:
machine_logs.append(
[self.k.vehicle.get_timestep(id),
self.k.vehicle.get_timedelta(id),
id])
machine_orientations.append(
self.k.vehicle.get_orientation(id))
machine_dynamics.append(
self.k.vehicle.get_speed(id)/max_speed)
else:
human_logs.append(
[self.k.vehicle.get_timestep(id),
self.k.vehicle.get_timedelta(id),
id])
human_orientations.append(
self.k.vehicle.get_orientation(id))
human_dynamics.append(
self.k.vehicle.get_speed(id)/max_speed)
for id in machine_idlist:
machine_logs.append(
[self.k.vehicle.get_timestep(id),
self.k.vehicle.get_timedelta(id),
id])
machine_orientations.append(
self.k.vehicle.get_orientation(id))
machine_dynamics.append(
self.k.vehicle.get_speed(id)/max_speed)
# step the renderer
self.frame = self.renderer.render(human_orientations,
machine_orientations,
human_dynamics,
machine_dynamics,
human_logs,
machine_logs)
# get local observation of RL vehicles
self.sights = []
for id in human_idlist:
# Force tracking human vehicles by adding "track" in vehicle id.
# The tracked human vehicles will be treated as machine vehicles.
if "track" in id:
orientation = self.k.vehicle.get_orientation(id)
sight = self.renderer.get_sight(
orientation, id)
self.sights.append(sight)
for id in machine_idlist:
orientation = self.k.vehicle.get_orientation(id)
sight = self.renderer.get_sight(
orientation, id)
self.sights.append(sight)
| []
| []
| [
"TEST_FLAG"
]
| [] | ["TEST_FLAG"] | python | 1 | 0 | |
vendor/cloud.google.com/go/spanner/client.go | /*
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spanner
import (
"context"
"fmt"
"os"
"regexp"
"sync/atomic"
"time"
"cloud.google.com/go/internal/trace"
vkit "cloud.google.com/go/spanner/apiv1"
"cloud.google.com/go/spanner/internal/backoff"
"google.golang.org/api/option"
sppb "google.golang.org/genproto/googleapis/spanner/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
const (
endpoint = "spanner.googleapis.com:443"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
resourcePrefixHeader = "google-cloud-resource-prefix"
)
const (
// Scope is the scope for Cloud Spanner Data API.
Scope = "https://www.googleapis.com/auth/spanner.data"
// AdminScope is the scope for Cloud Spanner Admin APIs.
AdminScope = "https://www.googleapis.com/auth/spanner.admin"
)
var (
validDBPattern = regexp.MustCompile("^projects/[^/]+/instances/[^/]+/databases/[^/]+$")
)
func validDatabaseName(db string) error {
if matched := validDBPattern.MatchString(db); !matched {
return fmt.Errorf("database name %q should conform to pattern %q",
db, validDBPattern.String())
}
return nil
}
// Client is a client for reading and writing data to a Cloud Spanner database.
// A client is safe to use concurrently, except for its Close method.
type Client struct {
// rr must be accessed through atomic operations.
rr uint32
clients []*vkit.Client
database string
// Metadata to be sent with each request.
md metadata.MD
idleSessions *sessionPool
// sessionLabels for the sessions created by this client.
sessionLabels map[string]string
}
// ClientConfig has configurations for the client.
type ClientConfig struct {
// NumChannels is the number of gRPC channels.
// If zero, a reasonable default is used based on the execution environment.
NumChannels int
// SessionPoolConfig is the configuration for session pool.
SessionPoolConfig
// SessionLabels for the sessions created by this client.
// See https://cloud.google.com/spanner/docs/reference/rpc/google.spanner.v1#session
// for more info.
SessionLabels map[string]string
}
// errDial returns error for dialing to Cloud Spanner.
func errDial(ci int, err error) error {
e := toSpannerError(err).(*Error)
e.decorate(fmt.Sprintf("dialing fails for channel[%v]", ci))
return e
}
func contextWithOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
existing, ok := metadata.FromOutgoingContext(ctx)
if ok {
md = metadata.Join(existing, md)
}
return metadata.NewOutgoingContext(ctx, md)
}
// NewClient creates a client to a database. A valid database name has the
// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses
// a default configuration.
func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) {
return NewClientWithConfig(ctx, database, ClientConfig{}, opts...)
}
// NewClientWithConfig creates a client to a database. A valid database name has
// the form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (c *Client, err error) {
c = &Client{
database: database,
md: metadata.Pairs(resourcePrefixHeader, database),
}
// Make a copy of labels.
c.sessionLabels = make(map[string]string)
for k, v := range config.SessionLabels {
c.sessionLabels[k] = v
}
// Prepare gRPC channels.
if config.NumChannels == 0 {
config.NumChannels = numChannels
}
// Default configs for session pool.
if config.MaxOpened == 0 {
config.MaxOpened = uint64(config.NumChannels * 100)
}
if config.MaxBurst == 0 {
config.MaxBurst = 10
}
// Validate database path.
if err := validDatabaseName(database); err != nil {
return nil, err
}
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.NewClient")
defer func() { trace.EndSpan(ctx, err) }()
// Append emulator options if SPANNER_EMULATOR_HOST has been set.
if emulatorAddr := os.Getenv("SPANNER_EMULATOR_HOST"); emulatorAddr != "" {
emulatorOpts := []option.ClientOption{
option.WithEndpoint(emulatorAddr),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
}
opts = append(opts, emulatorOpts...)
}
// gRPC options.
allOpts := []option.ClientOption{
option.WithEndpoint(endpoint),
option.WithScopes(Scope),
option.WithGRPCDialOption(
grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(100<<20),
grpc.MaxCallRecvMsgSize(100<<20),
),
),
}
allOpts = append(allOpts, opts...)
// TODO(deklerk): This should be replaced with a balancer with
// config.NumChannels connections, instead of config.NumChannels
// clients.
for i := 0; i < config.NumChannels; i++ {
client, err := vkit.NewClient(ctx, allOpts...)
if err != nil {
return nil, errDial(i, err)
}
c.clients = append(c.clients, client)
}
// Prepare session pool.
// TODO: support more loadbalancing options.
config.SessionPoolConfig.getRPCClient = func() (*vkit.Client, error) {
return c.rrNext(), nil
}
config.SessionPoolConfig.sessionLabels = c.sessionLabels
sp, err := newSessionPool(database, config.SessionPoolConfig, c.md)
if err != nil {
c.Close()
return nil, err
}
c.idleSessions = sp
return c, nil
}
// rrNext returns the next available vkit Cloud Spanner RPC client in a
// round-robin manner.
func (c *Client) rrNext() *vkit.Client {
return c.clients[atomic.AddUint32(&c.rr, 1)%uint32(len(c.clients))]
}
// Close closes the client.
func (c *Client) Close() {
if c.idleSessions != nil {
c.idleSessions.close()
}
for _, gpc := range c.clients {
gpc.Close()
}
}
// Single provides a read-only snapshot transaction optimized for the case
// where only a single read or query is needed. This is more efficient than
// using ReadOnlyTransaction() for a single read or query.
//
// Single will use a strong TimestampBound by default. Use
// ReadOnlyTransaction.WithTimestampBound to specify a different
// TimestampBound. A non-strong bound can be used to reduce latency, or
// "time-travel" to prior versions of the database, see the documentation of
// TimestampBound for details.
func (c *Client) Single() *ReadOnlyTransaction {
t := &ReadOnlyTransaction{singleUse: true, sp: c.idleSessions}
t.txReadOnly.txReadEnv = t
return t
}
// ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for
// multiple reads from the database. You must call Close() when the
// ReadOnlyTransaction is no longer needed to release resources on the server.
//
// ReadOnlyTransaction will use a strong TimestampBound by default. Use
// ReadOnlyTransaction.WithTimestampBound to specify a different
// TimestampBound. A non-strong bound can be used to reduce latency, or
// "time-travel" to prior versions of the database, see the documentation of
// TimestampBound for details.
func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction {
t := &ReadOnlyTransaction{
singleUse: false,
sp: c.idleSessions,
txReadyOrClosed: make(chan struct{}),
}
t.txReadOnly.txReadEnv = t
return t
}
// BatchReadOnlyTransaction returns a BatchReadOnlyTransaction that can be used
// for partitioned reads or queries from a snapshot of the database. This is
// useful in batch processing pipelines where one wants to divide the work of
// reading from the database across multiple machines.
//
// Note: This transaction does not use the underlying session pool but creates a
// new session each time, and the session is reused across clients.
//
// You should call Close() after the txn is no longer needed on local
// client, and call Cleanup() when the txn is finished for all clients, to free
// the session.
func (c *Client) BatchReadOnlyTransaction(ctx context.Context, tb TimestampBound) (*BatchReadOnlyTransaction, error) {
var (
tx transactionID
rts time.Time
s *session
sh *sessionHandle
err error
)
defer func() {
if err != nil && sh != nil {
s.delete(ctx)
}
}()
// Create session.
sc := c.rrNext()
s, err = createSession(ctx, sc, c.database, c.sessionLabels, c.md)
if err != nil {
return nil, err
}
sh = &sessionHandle{session: s}
// Begin transaction.
res, err := sh.getClient().BeginTransaction(contextWithOutgoingMetadata(ctx, sh.getMetadata()), &sppb.BeginTransactionRequest{
Session: sh.getID(),
Options: &sppb.TransactionOptions{
Mode: &sppb.TransactionOptions_ReadOnly_{
ReadOnly: buildTransactionOptionsReadOnly(tb, true),
},
},
})
if err != nil {
return nil, toSpannerError(err)
}
tx = res.Id
if res.ReadTimestamp != nil {
rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos))
}
t := &BatchReadOnlyTransaction{
ReadOnlyTransaction: ReadOnlyTransaction{
tx: tx,
txReadyOrClosed: make(chan struct{}),
state: txActive,
sh: sh,
rts: rts,
},
ID: BatchReadOnlyTransactionID{
tid: tx,
sid: sh.getID(),
rts: rts,
},
}
t.txReadOnly.txReadEnv = t
return t, nil
}
// BatchReadOnlyTransactionFromID reconstruct a BatchReadOnlyTransaction from
// BatchReadOnlyTransactionID
func (c *Client) BatchReadOnlyTransactionFromID(tid BatchReadOnlyTransactionID) *BatchReadOnlyTransaction {
sc := c.rrNext()
s := &session{valid: true, client: sc, id: tid.sid, createTime: time.Now(), md: c.md}
sh := &sessionHandle{session: s}
t := &BatchReadOnlyTransaction{
ReadOnlyTransaction: ReadOnlyTransaction{
tx: tid.tid,
txReadyOrClosed: make(chan struct{}),
state: txActive,
sh: sh,
rts: tid.rts,
},
ID: tid,
}
t.txReadOnly.txReadEnv = t
return t
}
type transactionInProgressKey struct{}
func checkNestedTxn(ctx context.Context) error {
if ctx.Value(transactionInProgressKey{}) != nil {
return spannerErrorf(codes.FailedPrecondition, "Cloud Spanner does not support nested transactions")
}
return nil
}
// ReadWriteTransaction executes a read-write transaction, with retries as
// necessary.
//
// The function f will be called one or more times. It must not maintain
// any state between calls.
//
// If the transaction cannot be committed or if f returns an ABORTED error,
// ReadWriteTransaction will call f again. It will continue to call f until the
// transaction can be committed or the Context times out or is cancelled. If f
// returns an error other than ABORTED, ReadWriteTransaction will abort the
// transaction and return the error.
//
// To limit the number of retries, set a deadline on the Context rather than
// using a fixed limit on the number of attempts. ReadWriteTransaction will
// retry as needed until that deadline is met.
//
// See https://godoc.org/cloud.google.com/go/spanner#ReadWriteTransaction for
// more details.
func (c *Client) ReadWriteTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (commitTimestamp time.Time, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.ReadWriteTransaction")
defer func() { trace.EndSpan(ctx, err) }()
if err := checkNestedTxn(ctx); err != nil {
return time.Time{}, err
}
var (
ts time.Time
sh *sessionHandle
)
err = runWithRetryOnAborted(ctx, func(ctx context.Context) error {
var (
err error
t *ReadWriteTransaction
)
if sh == nil || sh.getID() == "" || sh.getClient() == nil {
// Session handle hasn't been allocated or has been destroyed.
sh, err = c.idleSessions.takeWriteSession(ctx)
if err != nil {
// If session retrieval fails, just fail the transaction.
return err
}
t = &ReadWriteTransaction{
sh: sh,
tx: sh.getTransactionID(),
}
} else {
t = &ReadWriteTransaction{
sh: sh,
}
}
t.txReadOnly.txReadEnv = t
trace.TracePrintf(ctx, map[string]interface{}{"transactionID": string(sh.getTransactionID())},
"Starting transaction attempt")
if err = t.begin(ctx); err != nil {
return err
}
ts, err = t.runInTransaction(ctx, f)
return err
})
if sh != nil {
sh.recycle()
}
return ts, err
}
func runWithRetryOnAborted(ctx context.Context, f func(context.Context) error) error {
var funcErr error
retryCount := 0
for {
select {
case <-ctx.Done():
// Do context check here so that even f() failed to do so (for
// example, gRPC implementation bug), the loop can still have a
// chance to exit as expected.
return errContextCanceled(ctx, funcErr)
default:
}
funcErr = f(ctx)
if funcErr == nil {
return nil
}
// Only retry on ABORTED.
if isAbortErr(funcErr) {
// Aborted, do exponential backoff and continue.
b, ok := extractRetryDelay(funcErr)
if !ok {
b = backoff.DefaultBackoff.Delay(retryCount)
}
trace.TracePrintf(ctx, nil, "Backing off after ABORTED for %s, then retrying", b)
select {
case <-ctx.Done():
return errContextCanceled(ctx, funcErr)
case <-time.After(b):
}
retryCount++
continue
}
// Error isn't ABORTED / no error, return immediately.
return funcErr
}
}
// applyOption controls the behavior of Client.Apply.
type applyOption struct {
// If atLeastOnce == true, Client.Apply will execute the mutations on Cloud
// Spanner at least once.
atLeastOnce bool
}
// An ApplyOption is an optional argument to Apply.
type ApplyOption func(*applyOption)
// ApplyAtLeastOnce returns an ApplyOption that removes replay protection.
//
// With this option, Apply may attempt to apply mutations more than once; if
// the mutations are not idempotent, this may lead to a failure being reported
// when the mutation was applied more than once. For example, an insert may
// fail with ALREADY_EXISTS even though the row did not exist before Apply was
// called. For this reason, most users of the library will prefer not to use
// this option. However, ApplyAtLeastOnce requires only a single RPC, whereas
// Apply's default replay protection may require an additional RPC. So this
// option may be appropriate for latency sensitive and/or high throughput blind
// writing.
func ApplyAtLeastOnce() ApplyOption {
return func(ao *applyOption) {
ao.atLeastOnce = true
}
}
// Apply applies a list of mutations atomically to the database.
func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (commitTimestamp time.Time, err error) {
ao := &applyOption{}
for _, opt := range opts {
opt(ao)
}
if !ao.atLeastOnce {
return c.ReadWriteTransaction(ctx, func(ctx context.Context, t *ReadWriteTransaction) error {
return t.BufferWrite(ms)
})
}
ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Apply")
defer func() { trace.EndSpan(ctx, err) }()
t := &writeOnlyTransaction{c.idleSessions}
return t.applyAtLeastOnce(ctx, ms...)
}
| [
"\"SPANNER_EMULATOR_HOST\""
]
| []
| [
"SPANNER_EMULATOR_HOST"
]
| [] | ["SPANNER_EMULATOR_HOST"] | go | 1 | 0 | |
dict/dict_test.go | package dict
import (
"os"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
)
func dictFile() string {
p := os.Getenv("EDICT")
if len(p) == 0 {
p = os.ExpandEnv("$HOME/Development/Other/ECDICT/ecdict.csv")
}
return p
}
func TestSimpleDict_Match(t *testing.T) {
dict, err := NewSimpleDict(dictFile())
if err != nil {
t.Fatal(err)
}
r, err := dict.Match("aesthete")
if err != nil {
t.Fatal("SimpleDict match error: ", err)
}
assert.Equal(t, "aesthete", r.Word)
assert.Equal(t, "'i:sθi:t", r.Phonetic)
assert.Equal(t, "n one who professes great sensitivity to the beauty of art and nature", r.Definition)
assert.Equal(t, "n. 审美家, 唯美主义者", r.Translation)
assert.Equal(t, "gre", r.Tag)
assert.Equal(t, 34101, r.Bnc)
assert.Equal(t, 29682, r.Frq)
assert.Equal(t, "s:aesthetes", r.Exchange)
keys := []string{
"aburamycin", "aburamycin", "zymophosphate", "zymophyte", "zymoplasm", "zymoplastic", "wilfully", "wilfulness",
"wilga", "wilgus", "wilhelm", "vertin", "vertiplane", "vertiport", "vertisol", "vertisols", "vertisporin",
"unspotted", "unsprung", "unsqueeze", "unsqueezing", "two-bath chrome tannage", "two-beam", "two-bedroom", "two-bin system",
"two-bit", "two-blade propeller", "nyack", "nyad", "nyaff", "nyah", "nyah-nyah", "nyala", "nyam", "nyama", "nyamps",
"Nyamuragira", "Nyamwezi", "nyang",
}
for _, k := range keys {
r, err := dict.Match(k)
assert.NoError(t, err)
assert.Equal(t, k, r.Word)
}
}
func BenchmarkSimpleDict_Match(b *testing.B) {
dict, err := NewSimpleDict(dictFile())
if err != nil {
b.Fatal(err)
}
keys := []string{
"aburamycin", "aburamycin", "zymophosphate", "zymophyte", "zymoplasm", "zymoplastic", "wilfully", "wilfulness",
"wilga", "wilgus", "wilhelm", "vertin", "vertiplane", "vertiport", "vertisol", "vertisols", "vertisporin",
"unspotted", "unsprung", "unsqueeze", "unsqueezing", "two-bath chrome tannage", "two-beam", "two-bedroom", "two-bin system",
"two-bit", "two-blade propeller", "nyack", "nyad", "nyaff", "nyah", "nyah-nyah", "nyala", "nyam", "nyama", "nyamps",
"Nyamuragira", "Nyamwezi", "nyang",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
k := keys[i%len(keys)]
dict.Match(k)
}
}
func TestSimpleDict_Like(t *testing.T) {
dict, err := NewSimpleDict(dictFile())
if err != nil {
t.Fatal(err)
}
type args struct {
k string
}
tests := []struct {
name string
dict *SimpleDict
args args
want []*Record
wantErr bool
}{
{
name: "LikeShouldReturnsItems",
args: args{
k: "unspr",
},
dict: dict,
want: []*Record{
{
Word: "unsprung",
Phonetic: "'ʌn'sprʌŋ",
Translation: "a. 没有安装弹簧的",
Exchange: "p:unsprung/0:unsprung/1:p",
},
},
},
{
name: "ReturnsErrorIfNotLikeAnyRecords",
args: args{
k: "thereisnowordslikethis",
},
dict: dict,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
d := tt.dict
got, err := d.Like(tt.args.k)
if (err != nil) != tt.wantErr {
t.Errorf("SimpleDict.Like() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("SimpleDict.Like() = %v, want %v", got, tt.want)
}
})
}
}
func BenchmarkSimpleDict_Like_4Characters(b *testing.B) {
dict, err := NewSimpleDict(dictFile())
if err != nil {
b.Fatal(err)
}
keys := []string{
"aburamycin", "aburamycin", "zymophosphate", "zymophyte", "zymoplasm", "zymoplastic", "wilfully", "wilfulness",
"wilga", "wilgus", "wilhelm", "vertin", "vertiplane", "vertiport", "vertisol", "vertisols", "vertisporin",
"unspotted", "unsprung", "unsqueeze", "unsqueezing", "two-bath chrome tannage", "two-beam", "two-bedroom", "two-bin system",
"two-bit", "two-blade propeller", "nyack", "nyad", "nyaff", "nyah", "nyah-nyah", "nyala", "nyam", "nyama", "nyamps",
"Nyamuragira", "Nyamwezi", "nyang",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dict.Like(keys[i%len(keys)][:4])
}
}
| [
"\"EDICT\""
]
| []
| [
"EDICT"
]
| [] | ["EDICT"] | go | 1 | 0 | |
snappass/main.py | import os
import sys
import uuid
import redis
from cryptography.fernet import Fernet
from flask import abort, Flask, render_template, request
from redis.exceptions import ConnectionError
from werkzeug.urls import url_quote_plus
from werkzeug.urls import url_unquote_plus
from distutils.util import strtobool
NO_SSL = bool(strtobool(os.environ.get('NO_SSL', 'False')))
URL_PREFIX = os.environ.get('URL_PREFIX', None)
TOKEN_SEPARATOR = '~'
# Initialize Flask Application
app = Flask(__name__)
if os.environ.get('DEBUG'):
app.debug = True
app.secret_key = os.environ.get('SECRET_KEY', 'Secret Key')
app.config.update(
dict(STATIC_URL=os.environ.get('STATIC_URL', 'static')))
# Initialize Redis
if os.environ.get('MOCK_REDIS'):
from fakeredis import FakeStrictRedis
redis_client = FakeStrictRedis()
elif os.environ.get('REDIS_URL'):
redis_client = redis.StrictRedis.from_url(os.environ.get('REDIS_URL'))
else:
redis_host = os.environ.get('REDIS_HOST', 'localhost')
redis_port = os.environ.get('REDIS_PORT', 6379)
redis_db = os.environ.get('SNAPPASS_REDIS_DB', 0)
redis_client = redis.StrictRedis(
host=redis_host, port=redis_port, db=redis_db)
REDIS_PREFIX = os.environ.get('REDIS_PREFIX', 'snappass')
TIME_CONVERSION = {'two weeks': 1209600, 'week': 604800, 'day': 86400, 'hour': 3600}
def check_redis_alive(fn):
def inner(*args, **kwargs):
try:
if fn.__name__ == 'main':
redis_client.ping()
return fn(*args, **kwargs)
except ConnectionError as e:
print('Failed to connect to redis! %s' % e.message)
if fn.__name__ == 'main':
sys.exit(0)
else:
return abort(500)
return inner
def encrypt(password):
"""
Take a password string, encrypt it with Fernet symmetric encryption,
and return the result (bytes), with the decryption key (bytes)
"""
encryption_key = Fernet.generate_key()
fernet = Fernet(encryption_key)
encrypted_password = fernet.encrypt(password.encode('utf-8'))
return encrypted_password, encryption_key
def decrypt(password, decryption_key):
"""
Decrypt a password (bytes) using the provided key (bytes),
and return the plain-text password (bytes).
"""
fernet = Fernet(decryption_key)
return fernet.decrypt(password)
def parse_token(token):
token_fragments = token.split(TOKEN_SEPARATOR, 1) # Split once, not more.
storage_key = token_fragments[0]
try:
decryption_key = token_fragments[1].encode('utf-8')
except IndexError:
decryption_key = None
return storage_key, decryption_key
@check_redis_alive
def set_password(password, ttl):
"""
Encrypt and store the password for the specified lifetime.
Returns a token comprised of the key where the encrypted password
is stored, and the decryption key.
"""
storage_key = REDIS_PREFIX + uuid.uuid4().hex
encrypted_password, encryption_key = encrypt(password)
redis_client.setex(storage_key, ttl, encrypted_password)
encryption_key = encryption_key.decode('utf-8')
token = TOKEN_SEPARATOR.join([storage_key, encryption_key])
return token
@check_redis_alive
def get_password(token):
"""
From a given token, return the initial password.
If the token is tilde-separated, we decrypt the password fetched from Redis.
If not, the password is simply returned as is.
"""
storage_key, decryption_key = parse_token(token)
password = redis_client.get(storage_key)
redis_client.delete(storage_key)
if password is not None:
if decryption_key is not None:
password = decrypt(password, decryption_key)
return password.decode('utf-8')
@check_redis_alive
def password_exists(token):
storage_key, decryption_key = parse_token(token)
return redis_client.exists(storage_key)
def empty(value):
if not value:
return True
def clean_input():
"""
Make sure we're not getting bad data from the front end,
format data to be machine readable
"""
if empty(request.form.get('password', '')):
abort(400)
if empty(request.form.get('ttl', '')):
abort(400)
time_period = request.form['ttl'].lower()
if time_period not in TIME_CONVERSION:
abort(400)
return TIME_CONVERSION[time_period], request.form['password']
@app.route('/', methods=['GET'])
def index():
return render_template('set_password.html')
@app.route('/', methods=['POST'])
def handle_password():
ttl, password = clean_input()
token = set_password(password, ttl)
if NO_SSL:
base_url = request.url_root
else:
base_url = request.url_root.replace("http://", "https://")
if URL_PREFIX:
base_url = base_url + URL_PREFIX.strip("/") + "/"
link = base_url + url_quote_plus(token)
return render_template('confirm.html', password_link=link)
@app.route('/<password_key>', methods=['GET'])
def preview_password(password_key):
password_key = url_unquote_plus(password_key)
if not password_exists(password_key):
return render_template('expired.html')
return render_template('preview.html')
@app.route('/<password_key>', methods=['POST'])
def show_password(password_key):
password_key = url_unquote_plus(password_key)
password = get_password(password_key)
if not password:
abort(404)
return render_template('password.html', password=password)
@check_redis_alive
def main():
app.run(host='0.0.0.0')
if __name__ == '__main__':
main()
| []
| []
| [
"REDIS_PORT",
"SNAPPASS_REDIS_DB",
"STATIC_URL",
"NO_SSL",
"REDIS_HOST",
"URL_PREFIX",
"SECRET_KEY",
"DEBUG",
"REDIS_URL",
"MOCK_REDIS",
"REDIS_PREFIX"
]
| [] | ["REDIS_PORT", "SNAPPASS_REDIS_DB", "STATIC_URL", "NO_SSL", "REDIS_HOST", "URL_PREFIX", "SECRET_KEY", "DEBUG", "REDIS_URL", "MOCK_REDIS", "REDIS_PREFIX"] | python | 11 | 0 | |
cmd/main.go | package main
import (
"encoding/json"
"log"
"net/http"
"os"
"github.com/julienschmidt/httprouter"
"github.com/rumyantseva/lowest-common-ancestor/pkg/handlers"
"github.com/rumyantseva/lowest-common-ancestor/pkg/lca"
)
func main() {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8888"
}
config := os.Getenv("CONFIG_FILE")
if len(config) == 0 {
config = "./default_config.json"
}
log.Printf("Read data from file: %s", config)
file, err := os.Open(config)
if err != nil {
log.Fatalf("Couldn't open config file: %s", err.Error())
}
var bureau lca.Node
err = json.NewDecoder(file).Decode(&bureau)
if err != nil {
log.Fatalf("Couldn't parse data from file: %s", err.Error())
}
log.Printf("Data loaded. The CEO is %s.", bureau.Key)
tarjan := lca.NewTarjan(&bureau)
router := httprouter.New()
router.GET("/api/v1/closest-common-manager", handlers.ClosestCommonManager(tarjan))
log.Fatal(http.ListenAndServe(":"+port, router))
}
| [
"\"PORT\"",
"\"CONFIG_FILE\""
]
| []
| [
"PORT",
"CONFIG_FILE"
]
| [] | ["PORT", "CONFIG_FILE"] | go | 2 | 0 | |
tools/universe/package_publisher.py | #!/usr/bin/env python3
import base64
import difflib
import http.client
import json
import logging
import os
import shutil
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
# By default, publish GA releases at 0, 100, 200, 300, ... to allow hotfixes within releases.
ga_index_multiplier = 100
# Don't bother with skipping indexes for beta releases, just go sequentially.
beta_index_multiplier = 1
class UniversePackagePublisher(object):
'''Creates a PR for a release against the Universe repository at http://github.com/mesosphere/universe.
'''
def __init__(self, package_name, package_version, commit_desc, beta_release, dry_run=False):
self._pkg_name = package_name
self._pkg_version = package_version
self._pr_title = 'Release {} {} (automated commit)\n\n'.format(self._pkg_name, self._pkg_version)
self._commit_desc = commit_desc
self._beta_release = beta_release
self._dry_run = dry_run
# Optional configuration via envvars:
self._release_branch = os.environ.get('RELEASE_BRANCH', 'version-3.x')
self._release_index = int(os.environ.get('RELEASE_INDEX', -1))
self._github_user = os.environ.get('GITHUB_USER', 'mesosphere-ci')
self._github_token = os.environ.get('GITHUB_TOKEN', None)
if self._github_token is None:
if self._dry_run:
self._github_token = 'DRY_RUN'
else:
raise Exception('GITHUB_TOKEN is required: Credential to create a PR against Universe')
self._enc_github_token = base64.encodestring(self._github_token.encode('utf-8')).decode('utf-8').rstrip('\n')
self._release_universe_repo = os.environ.get('RELEASE_UNIVERSE_REPO', 'mesosphere/universe')
def _find_release_index(self, repo_pkg_base):
'''Returns the correct number/id for this release in the universe tree, and the prior release to diff against.
Returns a tuple containing two ints: [prior_index (or -1 if N/A), this_index]'''
# Find the index to be used for this release:
last_index = -1
if self._release_index >= 0:
if os.path.exists(os.path.join(repo_pkg_base, str(self._release_index))):
raise Exception('Requested index {} is already occupied in directory {}: {}'.format(
self._release_index, repo_pkg_base, sorted(os.listdir(repo_pkg_base))))
this_index = self._release_index
# Search backwards from this_index to find a prior release to diff against:
for num in reversed(range(0, this_index)): # reversed([0, ..., this-1]) => [this-1, ..., 0]
if os.path.isdir(os.path.join(repo_pkg_base, str(num))):
last_index = num
break
else:
# Iterate over the directory contents and find the largest integer
for filename in os.listdir(repo_pkg_base):
if not os.path.isdir(os.path.join(repo_pkg_base, filename)):
continue
try:
filenum = int(filename)
except ValueError:
continue
if filenum > last_index:
last_index = filenum
# Find the next index >last_index that fits index_multiplier (0*n, 1*n, 2*n, 3*n, ...)
if last_index == -1:
# Special case: The below math would round up to 1*n
this_index = 0
else:
index_multiplier = beta_index_multiplier if self._beta_release else ga_index_multiplier
this_index = index_multiplier * (int(last_index / index_multiplier) + 1)
return (last_index, this_index)
def _create_universe_branch(self, scratchdir, pkgdir):
branch = 'automated/release_{}_{}_{}'.format(
self._pkg_name,
self._pkg_version,
base64.b64encode(os.urandom(4)).decode('utf-8').rstrip('='))
# check out the repo, create a new local branch:
ret = os.system(' && '.join([
'cd {}'.format(scratchdir),
'git clone --depth 1 --branch {} https://{}:{}@github.com/{} universe'.format(
self._release_branch, self._github_user, self._github_token, self._release_universe_repo),
'cd universe',
'git config --local user.email [email protected]',
'git config --local user.name release_builder.py',
'git checkout -b {}'.format(branch)]))
if ret != 0:
raise Exception('Failed to create local Universe git branch {}.'.format(branch))
universe_repo = os.path.join(scratchdir, 'universe')
repo_pkg_base = os.path.join(
universe_repo,
'repo',
'packages',
self._pkg_name[0].upper(),
self._pkg_name)
if not os.path.exists(repo_pkg_base):
os.makedirs(repo_pkg_base)
# find the prior and desired release number:
(last_index, this_index) = self._find_release_index(repo_pkg_base)
# copy the stub universe contents into a new release number, while calculating diffs:
last_dir = os.path.join(repo_pkg_base, str(last_index))
this_dir = os.path.join(repo_pkg_base, str(this_index))
shutil.copytree(pkgdir, this_dir)
# create a user-friendly diff for use in the commit message:
result_lines = self._compute_changes(last_dir, this_dir, last_index, this_index)
commitmsg_path = os.path.join(scratchdir, 'commitmsg.txt')
with open(commitmsg_path, 'w') as commitmsg_file:
commitmsg_file.write(self._pr_title)
commitmsg_file.writelines(result_lines)
# commit the change and push the branch:
cmds = ['cd {}'.format(os.path.join(scratchdir, 'universe')),
'git add .',
'git commit -q -F {}'.format(commitmsg_path)]
if self._dry_run:
# ensure the debug goes to stderr...:
cmds.append('git show -q HEAD 1>&2')
else:
cmds.append('git push origin {}'.format(branch))
ret = os.system(' && '.join(cmds))
if ret != 0:
raise Exception('Failed to push git branch {} to Universe.'.format(branch))
return (branch, commitmsg_path)
def _compute_changes(self, last_dir, this_dir, last_index, this_index):
if os.path.exists(last_dir):
last_dir_files = set(os.listdir(last_dir))
this_dir_files = set(os.listdir(this_dir))
removed_files = last_dir_files - this_dir_files
added_files = this_dir_files - last_dir_files
filediffs = {}
shared_files = last_dir_files & this_dir_files
for filename in shared_files:
# file exists in both new and old: calculate diff
last_filename = os.path.join(last_dir, filename)
this_filename = os.path.join(this_dir, filename)
with open(last_filename, 'r') as last_file, open(this_filename, 'r') as this_file:
filediff = ''.join(difflib.unified_diff(
last_file.readlines(), this_file.readlines(),
fromfile='{}/{}'.format(last_index, filename),
tofile='{}/{}'.format(this_index, filename)))
if filediff:
filediffs[filename] = filediff
else:
filediffs = {}
removed_files = {}
added_files = os.listdir(this_dir)
result_lines = [
'Changes between revisions {} => {}:\n'.format(last_index, this_index),
'{} files added: [{}]\n'.format(len(added_files), ', '.join(added_files)),
'{} files removed: [{}]\n'.format(len(removed_files), ', '.join(removed_files)),
'{} files changed:\n\n'.format(len(filediffs))]
if self._commit_desc:
result_lines.insert(0, 'Description:\n{}\n\n'.format(self._commit_desc))
# surround diff description with quotes to ensure formatting is preserved:
result_lines.append('```\n')
filediff_names = list(filediffs.keys())
filediff_names.sort()
for filename in filediff_names:
result_lines.append(filediffs[filename])
result_lines.append('```\n')
return result_lines
def _create_universe_pr(self, branch, commitmsg_path):
if self._dry_run:
log.info('[DRY RUN] Skipping creation of PR against branch {}'.format(branch))
return None
headers = {
'User-Agent': 'release_builder.py',
'Content-Type': 'application/json',
'Authorization': 'Basic {}'.format(self._enc_github_token)}
with open(commitmsg_path) as commitmsg_file:
payload = {
'title': self._pr_title,
'head': branch,
'base': self._release_branch,
'body': commitmsg_file.read()}
conn = http.client.HTTPSConnection('api.github.com')
conn.request(
'POST',
'/repos/{}/pulls'.format(self._release_universe_repo),
body=json.dumps(payload).encode('utf-8'),
headers=headers)
return conn.getresponse()
def publish(self, scratchdir, pkgdir):
branch, commitmsg_path = self._create_universe_branch(scratchdir, pkgdir)
return self._create_universe_pr(branch, commitmsg_path)
| []
| []
| [
"RELEASE_BRANCH",
"GITHUB_USER",
"RELEASE_INDEX",
"RELEASE_UNIVERSE_REPO",
"GITHUB_TOKEN"
]
| [] | ["RELEASE_BRANCH", "GITHUB_USER", "RELEASE_INDEX", "RELEASE_UNIVERSE_REPO", "GITHUB_TOKEN"] | python | 5 | 0 | |
http/rebase.go | package http
import (
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"os"
"time"
"strings"
"github.com/pallavkothari/rebasebot/github"
"github.com/pallavkothari/rebasebot/integrations"
)
func Rebase(w http.ResponseWriter, r *http.Request) {
receivedAt := time.Now()
logRequest(r)
if r.Method != "POST" {
w.WriteHeader(http.StatusNotFound)
logResponse(r, http.StatusNotFound, receivedAt)
return
}
var event github.Event
var responseStatus = http.StatusCreated
body, err := ioutil.ReadAll(r.Body)
if err != nil {
responseStatus = http.StatusInternalServerError
}
if !isVerifiedRequest(r.Header, body) {
w.WriteHeader(http.StatusUnauthorized)
logResponse(r, http.StatusUnauthorized, receivedAt)
return
}
if err := json.Unmarshal(body, &event); err != nil {
responseStatus = http.StatusBadRequest
log.Printf("http.request.body.parse_failed: %s\n", err.Error())
return
}
go func() {
if !(strings.Compare(event.Action, "created") == 0 && github.WasMentioned(event.Comment) && strings.Contains(event.Comment.Body, "rebase")) {
return
}
log.Printf("bot.rebase.started, name: %s\n", event.Repository.FullName)
defer log.Printf("bot.rebase.finished: %s\n", event.Repository.FullName)
pullRequest, err := event.Repository.FindPR(event.Issue.Number)
if err == nil {
integrations.GitRebase(pullRequest)
}
}()
w.WriteHeader(responseStatus)
logResponse(r, responseStatus, receivedAt)
}
func isVerifiedRequest(header http.Header, body []byte) bool {
serverSignature := os.Getenv("SECRET")
requestSignature := header.Get("X-Hub-Signature")
// when not set up with a secret
if len(serverSignature) < 1 {
log.Println("http.request.signature.verification.skipped")
return true
}
log.Println("http.request.signature.verification.started")
if len(requestSignature) < 1 {
log.Println("http.request.signature.verification.failed", "missing X-Hub-Signature header")
return false
}
mac := hmac.New(sha1.New, []byte(serverSignature))
mac.Write(body)
expectedMAC := mac.Sum(nil)
expectedSignature := "sha1=" + hex.EncodeToString(expectedMAC)
signatureMatched := hmac.Equal([]byte(expectedSignature), []byte(requestSignature))
if signatureMatched {
log.Println("http.request.signature.verification.passed")
} else {
log.Println("http.request.signature.verification.failed")
}
return signatureMatched
}
| [
"\"SECRET\""
]
| []
| [
"SECRET"
]
| [] | ["SECRET"] | go | 1 | 0 | |
sdk/go/google/storage/v1/bucket.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v1
import (
"context"
"reflect"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Creates a new bucket.
type Bucket struct {
pulumi.CustomResourceState
// Access controls on the bucket.
Acl BucketAccessControlResponseArrayOutput `pulumi:"acl"`
// The bucket's Autoclass configuration.
Autoclass BucketAutoclassResponseOutput `pulumi:"autoclass"`
// The bucket's billing configuration.
Billing BucketBillingResponseOutput `pulumi:"billing"`
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
Cors BucketCorsItemResponseArrayOutput `pulumi:"cors"`
// The bucket's custom placement configuration for Custom Dual Regions.
CustomPlacementConfig BucketCustomPlacementConfigResponseOutput `pulumi:"customPlacementConfig"`
// The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.
DefaultEventBasedHold pulumi.BoolOutput `pulumi:"defaultEventBasedHold"`
// Default access controls to apply to new objects when no ACL is provided.
DefaultObjectAcl ObjectAccessControlResponseArrayOutput `pulumi:"defaultObjectAcl"`
// Encryption configuration for a bucket.
Encryption BucketEncryptionResponseOutput `pulumi:"encryption"`
// HTTP 1.1 Entity tag for the bucket.
Etag pulumi.StringOutput `pulumi:"etag"`
// The bucket's IAM configuration.
IamConfiguration BucketIamConfigurationResponseOutput `pulumi:"iamConfiguration"`
// The kind of item this is. For buckets, this is always storage#bucket.
Kind pulumi.StringOutput `pulumi:"kind"`
// User-provided labels, in key/value pairs.
Labels pulumi.StringMapOutput `pulumi:"labels"`
// The bucket's lifecycle configuration. See lifecycle management for more information.
Lifecycle BucketLifecycleResponseOutput `pulumi:"lifecycle"`
// The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.
Location pulumi.StringOutput `pulumi:"location"`
// The type of the bucket location.
LocationType pulumi.StringOutput `pulumi:"locationType"`
// The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.
Logging BucketLoggingResponseOutput `pulumi:"logging"`
// The metadata generation of this bucket.
Metageneration pulumi.StringOutput `pulumi:"metageneration"`
// The name of the bucket.
Name pulumi.StringOutput `pulumi:"name"`
// The owner of the bucket. This is always the project team's owner group.
Owner BucketOwnerResponseOutput `pulumi:"owner"`
// The project number of the project the bucket belongs to.
ProjectNumber pulumi.StringOutput `pulumi:"projectNumber"`
// The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.
RetentionPolicy BucketRetentionPolicyResponseOutput `pulumi:"retentionPolicy"`
// The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO to turn on Turbo Replication on a bucket.
Rpo pulumi.StringOutput `pulumi:"rpo"`
// Reserved for future use.
SatisfiesPZS pulumi.BoolOutput `pulumi:"satisfiesPZS"`
// The URI of this bucket.
SelfLink pulumi.StringOutput `pulumi:"selfLink"`
// The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.
StorageClass pulumi.StringOutput `pulumi:"storageClass"`
// The creation time of the bucket in RFC 3339 format.
TimeCreated pulumi.StringOutput `pulumi:"timeCreated"`
// The modification time of the bucket in RFC 3339 format.
Updated pulumi.StringOutput `pulumi:"updated"`
// The bucket's versioning configuration.
Versioning BucketVersioningResponseOutput `pulumi:"versioning"`
// The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.
Website BucketWebsiteResponseOutput `pulumi:"website"`
}
// NewBucket registers a new resource with the given unique name, arguments, and options.
func NewBucket(ctx *pulumi.Context,
name string, args *BucketArgs, opts ...pulumi.ResourceOption) (*Bucket, error) {
if args == nil {
args = &BucketArgs{}
}
var resource Bucket
err := ctx.RegisterResource("google-native:storage/v1:Bucket", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetBucket gets an existing Bucket resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetBucket(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *BucketState, opts ...pulumi.ResourceOption) (*Bucket, error) {
var resource Bucket
err := ctx.ReadResource("google-native:storage/v1:Bucket", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Bucket resources.
type bucketState struct {
}
type BucketState struct {
}
func (BucketState) ElementType() reflect.Type {
return reflect.TypeOf((*bucketState)(nil)).Elem()
}
type bucketArgs struct {
// Access controls on the bucket.
Acl []BucketAccessControlType `pulumi:"acl"`
// The bucket's Autoclass configuration.
Autoclass *BucketAutoclass `pulumi:"autoclass"`
// The bucket's billing configuration.
Billing *BucketBilling `pulumi:"billing"`
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
Cors []BucketCorsItem `pulumi:"cors"`
// The bucket's custom placement configuration for Custom Dual Regions.
CustomPlacementConfig *BucketCustomPlacementConfig `pulumi:"customPlacementConfig"`
// The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.
DefaultEventBasedHold *bool `pulumi:"defaultEventBasedHold"`
// Default access controls to apply to new objects when no ACL is provided.
DefaultObjectAcl []ObjectAccessControlType `pulumi:"defaultObjectAcl"`
// Encryption configuration for a bucket.
Encryption *BucketEncryption `pulumi:"encryption"`
// HTTP 1.1 Entity tag for the bucket.
Etag *string `pulumi:"etag"`
// The bucket's IAM configuration.
IamConfiguration *BucketIamConfiguration `pulumi:"iamConfiguration"`
// The ID of the bucket. For buckets, the id and name properties are the same.
Id *string `pulumi:"id"`
// The kind of item this is. For buckets, this is always storage#bucket.
Kind *string `pulumi:"kind"`
// User-provided labels, in key/value pairs.
Labels map[string]string `pulumi:"labels"`
// The bucket's lifecycle configuration. See lifecycle management for more information.
Lifecycle *BucketLifecycle `pulumi:"lifecycle"`
// The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.
Location *string `pulumi:"location"`
// The type of the bucket location.
LocationType *string `pulumi:"locationType"`
// The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.
Logging *BucketLogging `pulumi:"logging"`
// The metadata generation of this bucket.
Metageneration *string `pulumi:"metageneration"`
// The name of the bucket.
Name *string `pulumi:"name"`
// The owner of the bucket. This is always the project team's owner group.
Owner *BucketOwner `pulumi:"owner"`
PredefinedAcl *string `pulumi:"predefinedAcl"`
PredefinedDefaultObjectAcl *string `pulumi:"predefinedDefaultObjectAcl"`
Project *string `pulumi:"project"`
// The project number of the project the bucket belongs to.
ProjectNumber *string `pulumi:"projectNumber"`
Projection *string `pulumi:"projection"`
ProvisionalUserProject *string `pulumi:"provisionalUserProject"`
// The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.
RetentionPolicy *BucketRetentionPolicy `pulumi:"retentionPolicy"`
// The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO to turn on Turbo Replication on a bucket.
Rpo *string `pulumi:"rpo"`
// Reserved for future use.
SatisfiesPZS *bool `pulumi:"satisfiesPZS"`
// The URI of this bucket.
SelfLink *string `pulumi:"selfLink"`
// The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.
StorageClass *string `pulumi:"storageClass"`
// The creation time of the bucket in RFC 3339 format.
TimeCreated *string `pulumi:"timeCreated"`
// The modification time of the bucket in RFC 3339 format.
Updated *string `pulumi:"updated"`
UserProject *string `pulumi:"userProject"`
// The bucket's versioning configuration.
Versioning *BucketVersioning `pulumi:"versioning"`
// The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.
Website *BucketWebsite `pulumi:"website"`
}
// The set of arguments for constructing a Bucket resource.
type BucketArgs struct {
// Access controls on the bucket.
Acl BucketAccessControlTypeArrayInput
// The bucket's Autoclass configuration.
Autoclass BucketAutoclassPtrInput
// The bucket's billing configuration.
Billing BucketBillingPtrInput
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
Cors BucketCorsItemArrayInput
// The bucket's custom placement configuration for Custom Dual Regions.
CustomPlacementConfig BucketCustomPlacementConfigPtrInput
// The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.
DefaultEventBasedHold pulumi.BoolPtrInput
// Default access controls to apply to new objects when no ACL is provided.
DefaultObjectAcl ObjectAccessControlTypeArrayInput
// Encryption configuration for a bucket.
Encryption BucketEncryptionPtrInput
// HTTP 1.1 Entity tag for the bucket.
Etag pulumi.StringPtrInput
// The bucket's IAM configuration.
IamConfiguration BucketIamConfigurationPtrInput
// The ID of the bucket. For buckets, the id and name properties are the same.
Id pulumi.StringPtrInput
// The kind of item this is. For buckets, this is always storage#bucket.
Kind pulumi.StringPtrInput
// User-provided labels, in key/value pairs.
Labels pulumi.StringMapInput
// The bucket's lifecycle configuration. See lifecycle management for more information.
Lifecycle BucketLifecyclePtrInput
// The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.
Location pulumi.StringPtrInput
// The type of the bucket location.
LocationType pulumi.StringPtrInput
// The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.
Logging BucketLoggingPtrInput
// The metadata generation of this bucket.
Metageneration pulumi.StringPtrInput
// The name of the bucket.
Name pulumi.StringPtrInput
// The owner of the bucket. This is always the project team's owner group.
Owner BucketOwnerPtrInput
PredefinedAcl pulumi.StringPtrInput
PredefinedDefaultObjectAcl pulumi.StringPtrInput
Project pulumi.StringPtrInput
// The project number of the project the bucket belongs to.
ProjectNumber pulumi.StringPtrInput
Projection pulumi.StringPtrInput
ProvisionalUserProject pulumi.StringPtrInput
// The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.
RetentionPolicy BucketRetentionPolicyPtrInput
// The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO to turn on Turbo Replication on a bucket.
Rpo pulumi.StringPtrInput
// Reserved for future use.
SatisfiesPZS pulumi.BoolPtrInput
// The URI of this bucket.
SelfLink pulumi.StringPtrInput
// The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.
StorageClass pulumi.StringPtrInput
// The creation time of the bucket in RFC 3339 format.
TimeCreated pulumi.StringPtrInput
// The modification time of the bucket in RFC 3339 format.
Updated pulumi.StringPtrInput
UserProject pulumi.StringPtrInput
// The bucket's versioning configuration.
Versioning BucketVersioningPtrInput
// The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.
Website BucketWebsitePtrInput
}
func (BucketArgs) ElementType() reflect.Type {
return reflect.TypeOf((*bucketArgs)(nil)).Elem()
}
type BucketInput interface {
pulumi.Input
ToBucketOutput() BucketOutput
ToBucketOutputWithContext(ctx context.Context) BucketOutput
}
func (*Bucket) ElementType() reflect.Type {
return reflect.TypeOf((**Bucket)(nil)).Elem()
}
func (i *Bucket) ToBucketOutput() BucketOutput {
return i.ToBucketOutputWithContext(context.Background())
}
func (i *Bucket) ToBucketOutputWithContext(ctx context.Context) BucketOutput {
return pulumi.ToOutputWithContext(ctx, i).(BucketOutput)
}
type BucketOutput struct{ *pulumi.OutputState }
func (BucketOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Bucket)(nil)).Elem()
}
func (o BucketOutput) ToBucketOutput() BucketOutput {
return o
}
func (o BucketOutput) ToBucketOutputWithContext(ctx context.Context) BucketOutput {
return o
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*BucketInput)(nil)).Elem(), &Bucket{})
pulumi.RegisterOutputType(BucketOutput{})
}
| []
| []
| []
| [] | [] | go | null | null | null |
config/settings.py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from corsheaders.defaults import default_headers
import os
from dotenv import load_dotenv
from celery.schedules import crontab
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# ENV_PATH = BASE_DIR / 'envManager/.env'
ENV_PATH = BASE_DIR / 'envManager/envVariables.py'
load_dotenv(ENV_PATH)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
CRYPT_KEY = os.getenv('CRYPT_KEY')
# ROOT_SECRET = os.getenv('ROOT_SECRET')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
APPEND_SLASH = False
splittedHosts = os.getenv('ALLOWED_HOSTS').split(',')
ALLOWED_HOSTS = splittedHosts
# reset token time before expiry
DURATION = os.getenv('DURATION')
# Application definition
print('Starting app...')
SHARED_APPS = [
'envManager',
'channels',
'swagger_render',
]
TENANT_APPS = [
# The following Django contrib apps must be in TENANT_APPS
'django.contrib.contenttypes',
'corsheaders',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
# your tenant-specific apps
]
INSTALLED_APPS = list(SHARED_APPS) + \
[app for app in TENANT_APPS if app not in SHARED_APPS]
# TENANT_MODEL = "tenants.Tenant" # app.Model
# TENANT_DOMAIN_MODEL = "tenants.Domain" # app.Model
MIDDLEWARE = [
# 'django_tenants.middleware.main.TenantMainMiddleware',
# 'django-tenants.middleware.SuspiciousTenantMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django_tenants.postgresql_backend',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
}
}
# DATABASE_ROUTERS = (
# 'django_tenants.routers.TenantSyncRouter',
# )
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
ASGI_APPLICATION = 'config.routing.application'
APPEND_SLASH = True
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = False
CORS_ALLOW_HEADERS = list(default_headers) + [
'accessToken',
'secret'
]
# Celery config
CELERY_BROKER_URL = os.getenv('RABBIT_MQ_URL')
# Redis
REDIS_SERVER_NAME = os.getenv('REDIS_SERVER_NAME')
REDIS_PORT = os.getenv('REDIS_PORT')
# Email setup
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.getenv('SMTP_HOST')
EMAIL_HOST_USER = os.getenv('SMTP_HOST_USER')
EMAIL_HOST_PASSWORD = os.getenv('SMTP_HOST_PASSWORD')
EMAIL_PORT = os.getenv('SMTP_PORT')
EMAIL_USE_TLS = os.getenv('SMTP_USE_TLS')
SWAGGER_YAML_FILENAME = '/swagger_render/docs/lola-api-doc.yml'
# wasabi s3 bucket configuration
BUCKET_ACCESS_KEY_ID = os.getenv('BUCKET_ACCESS_KEY_ID')
BUCKET_SECRET_KEY = os.getenv('BUCKET_SECRET_KEY')
BUCKET_REGION_NAME = os.getenv('BUCKET_REGION_NAME')
BUCKET_NAME = os.getenv('BUCKET_NAME')
BUCKET_ENDPOINT_URL = os.getenv('BUCKET_ENDPOINT_URL')
PASSWORD_RESET_ENDPOINT = os.getenv('PASSWORD_RESET_ENDPOINT')
| []
| []
| [
"REDIS_PORT",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"SECRET_KEY",
"BUCKET_SECRET_KEY",
"SMTP_HOST_USER",
"REDIS_SERVER_NAME",
"SMTP_HOST",
"BUCKET_REGION_NAME",
"ROOT_SECRET",
"BUCKET_ACCESS_KEY_ID",
"DB_USER",
"ALLOWED_HOSTS",
"CRYPT_KEY",
"DURATION",
"SMTP_USE_TLS",
"BUCKET_NAME",
"PASSWORD_RESET_ENDPOINT",
"RABBIT_MQ_URL",
"BUCKET_ENDPOINT_URL",
"DB_PASSWORD",
"SMTP_PORT",
"SMTP_HOST_PASSWORD"
]
| [] | ["REDIS_PORT", "DB_HOST", "DB_PORT", "DB_NAME", "SECRET_KEY", "BUCKET_SECRET_KEY", "SMTP_HOST_USER", "REDIS_SERVER_NAME", "SMTP_HOST", "BUCKET_REGION_NAME", "ROOT_SECRET", "BUCKET_ACCESS_KEY_ID", "DB_USER", "ALLOWED_HOSTS", "CRYPT_KEY", "DURATION", "SMTP_USE_TLS", "BUCKET_NAME", "PASSWORD_RESET_ENDPOINT", "RABBIT_MQ_URL", "BUCKET_ENDPOINT_URL", "DB_PASSWORD", "SMTP_PORT", "SMTP_HOST_PASSWORD"] | python | 24 | 0 | |
adapter.py | import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import math
# import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import progressbar
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
############################Config###########################################
# path to waymo dataset "folder" (all .tfrecord files in that folder will be converted)
DATA_PATH = '/media/hy/DATA1/waymo/training/training_0006/'
# path to save kitti dataset
KITTI_PATH = '/media/hy/DATA1/z393/waymo_training_0006'
# location filter, use this to convert your preferred location
TIME_FILTER = True
TIME_NAME = ['Day']
# max indexing length
INDEX_LENGTH = 6
# as name
IMAGE_FORMAT = 'png'
# do not change
LABEL_PATH = KITTI_PATH + '/label_'
LABEL_ALL_PATH = KITTI_PATH + '/label_all'
ABEL_2D_PATH = KITTI_PATH + '/label_2d_'
IMAGE_PATH = KITTI_PATH + '/image_'
CALIB_PATH = KITTI_PATH + '/calib'
LIDAR_PATH = KITTI_PATH + '/velodyne'
###############################################################################
def isclose(x, y, rtol=1.e-5, atol=1.e-8):
return abs(x-y) <= atol + rtol * abs(y)
def euler_angles_from_rotation_matrix(R):
'''
From a paper by Gregory G. Slabaugh (undated),
"Computing Euler angles from a rotation matrix
'''
phi = 0.0
if isclose(R[2,0],-1.0):
theta = math.pi/2.0
psi = math.atan2(R[0,1],R[0,2])
elif isclose(R[2,0],1.0):
theta = -math.pi/2.0
psi = math.atan2(-R[0,1],-R[0,2])
else:
theta = -math.asin(R[2,0])
cos_theta = math.cos(theta)
psi = math.atan2(R[2,1]/cos_theta, R[2,2]/cos_theta)
phi = math.atan2(R[1,0]/cos_theta, R[0,0]/cos_theta)
return psi, theta, phi
def rotx(t):
""" 3D Rotation about the x-axis. """
c = np.cos(t)
s = np.sin(t)
return np.array([[1, 0, 0], [0, c, -s], [0, s, c]])
def roty(t):
""" Rotation about the y-axis. """
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def rotz(t):
""" Rotation about the z-axis. """
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
def get_box_transformation_matrix(obj_loc, obj_size, ry):
"""Create a transformation matrix for a given label box pose."""
tx,ty,tz = obj_loc
c = math.cos(ry)
s = math.sin(ry)
sl, sh, sw = obj_size
return np.array([
[ sl*c,-sw*s, 0,tx],
[ sl*s, sw*c, 0,ty],
[ 0, 0, sh,tz],
[ 0, 0, 0, 1]])
class Adapter:
def __init__(self):
self.__lidar_list = ['_FRONT', '_FRONT_RIGHT', '_FRONT_LEFT', '_SIDE_RIGHT', '_SIDE_LEFT']
self.__type_list = ['UNKNOWN', 'VEHICLE', 'PEDESTRIAN', 'SIGN', 'CYCLIST']
self.get_file_names()
self.create_folder()
def cvt(self):
""" convert dataset from Waymo to KITTI
Args:
return:
"""
bar = progressbar.ProgressBar(maxval=len(self.__file_names)+1,
widgets= [progressbar.Percentage(), ' ',
progressbar.Bar(marker='>',left='[',right=']'),' ',
progressbar.ETA()])
tf.enable_eager_execution()
file_num = 1
frame_num = 0
print("start converting ...")
bar.start()
for file_idx, file_name in enumerate(self.__file_names):
print('File {}/{}'.format(file_idx, len(self.__file_names)))
dataset = tf.data.TFRecordDataset(file_name, compression_type='')
for data in dataset:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
if TIME_FILTER == True and frame.context.stats.time_of_day not in TIME_NAME:
continue
# save the image:
# s1 = time.time()
self.save_image(frame, frame_num)
# e1 = time.time()
# parse the calib
# s2 = time.time()
self.save_calib(frame, frame_num)
# e2 = time.time()
# parse lidar
# s3 = time.time()
self.save_lidar(frame, frame_num)
# e3 = time.time()
# parse label
# s4 = time.time()
self.save_label(frame, frame_num)
# e4 = time.time()
#
self.save_2d_label(frame, frame_num)
frame_num += 1
bar.update(file_num)
file_num += 1
bar.finish()
print("\nfinished ...")
def save_image(self, frame, frame_num):
""" parse and save the images in png format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
for img in frame.images:
img_path = IMAGE_PATH + str(img.name - 1) + '/' + str(frame_num).zfill(INDEX_LENGTH) + '.' + IMAGE_FORMAT
img = cv2.imdecode(np.frombuffer(img.image, np.uint8), cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
plt.imsave(img_path, rgb_img, format=IMAGE_FORMAT)
def save_calib(self, frame, frame_num, kitti_format=True):
""" parse and save the calibration data
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
fp_calib = open(CALIB_PATH + '/' + str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
waymo_cam_RT=np.array([0,-1,0,0, 0,0,-1,0, 1,0,0,0, 0 ,0 ,0 ,1]).reshape(4,4)
camera_calib = []
R0_rect = ["%e" % i for i in np.eye(3).flatten()]
Tr_velo_to_cam = []
calib_context = ''
for camera in frame.context.camera_calibrations:
tmp=np.array(camera.extrinsic.transform).reshape(4,4)
tmp=np.linalg.inv(tmp)
axes_transformation = np.array([[0,-1,0,0],
[0,0,-1,0],
[1,0,0,0],
[0,0,0,1]])
tmp = np.matmul(axes_transformation, tmp)
tmp = tmp.reshape((16,))
Tr_velo_to_cam.append(["%e" % i for i in tmp])
for cam in frame.context.camera_calibrations:
tmp=np.zeros((3,4))
tmp[0,0]=cam.intrinsic[0]
tmp[1,1]=cam.intrinsic[1]
tmp[0,2]=cam.intrinsic[2]
tmp[1,2]=cam.intrinsic[3]
tmp[2,2]=1
if not kitti_format:
tmp=(tmp @ waymo_cam_RT)
tmp=list(tmp.reshape(12))
tmp = ["%e" % i for i in tmp]
camera_calib.append(tmp)
for i in range(5):
calib_context += "P" + str(i) + ": " + " ".join(camera_calib[i]) + '\n'
calib_context += "R0_rect" + ": " + " ".join(R0_rect) + '\n'
for i in range(5):
calib_context += "Tr_velo_to_cam_" + str(i) + ": " + " ".join(Tr_velo_to_cam[i]) + '\n'
fp_calib.write(calib_context)
fp_calib.close()
def save_lidar(self, frame, frame_num):
""" parse and save the lidar data in psd format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = np.concatenate(points, axis=0)
intensity_all = np.concatenate(intensity, axis=0)
point_cloud = np.column_stack((points_all, intensity_all))
pc_path = LIDAR_PATH + '/' + str(frame_num).zfill(INDEX_LENGTH) + '.bin'
point_cloud.tofile(pc_path)
def save_label(self, frame, frame_num, kitti_format=True):
""" parse and save the label data in .txt format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
fp_label_all = open(LABEL_ALL_PATH + '/' + str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
# preprocess bounding box data
id_to_bbox = dict()
id_to_name = dict()
for labels in frame.projected_lidar_labels:
name = labels.name
for label in labels.labels:
bbox = [label.box.center_x - label.box.length / 2, label.box.center_y - label.box.width / 2,
label.box.center_x + label.box.length / 2, label.box.center_y + label.box.width / 2]
id_to_bbox[label.id] = bbox
id_to_name[label.id] = name - 1
Tr_velo_to_cam = []
if kitti_format:
for camera in frame.context.camera_calibrations:
tmp=np.array(camera.extrinsic.transform).reshape(4,4)
tmp=np.linalg.inv(tmp)
axes_transformation = np.array([[0,-1,0,0],
[0,0,-1,0],
[1,0,0,0],
[0,0,0,1]])
tmp = np.matmul(axes_transformation, tmp)
Tr_velo_to_cam.append(tmp)
for obj in frame.laser_labels:
# caculate bounding box
bounding_box = None
name = None
id = obj.id
for lidar in self.__lidar_list:
if id + lidar in id_to_bbox:
bounding_box = id_to_bbox.get(id + lidar)
name = str(id_to_name.get(id + lidar))
break
if bounding_box == None or name == None:
continue
my_type = self.__type_list[obj.type]
truncated = 0
occluded = 0
height = obj.box.height
width = obj.box.width
length = obj.box.length
x = obj.box.center_x
y = obj.box.center_y
z = obj.box.center_z
rotation_y = obj.box.heading
if kitti_format:
z -= height/2
transform_box_to_cam = Tr_velo_to_cam[int(name)] @ get_box_transformation_matrix((x, y, z),(length,height,width), rotation_y)
pt1 = np.array([-0.5, 0.5, 0 , 1.])
pt2 = np.array([0.5, 0.5, 0 , 1.])
pt1 = np.matmul(transform_box_to_cam, pt1)
pt2 = np.matmul(transform_box_to_cam, pt2)
new_ry = math.atan2(pt2[2]-pt1[2],pt2[0]-pt1[0])
rotation_y = -new_ry
new_loc = np.matmul(Tr_velo_to_cam[int(name)], np.array([x,y,z,1]).T)
x, y, z = new_loc[:3]
beta = math.atan2(x, z)
alpha = (rotation_y + beta - math.pi / 2) % (2 * math.pi)
# save the labels
line = my_type + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(round(truncated, 2),
occluded,
round(alpha, 2),
round(bounding_box[0], 2),
round(bounding_box[1], 2),
round(bounding_box[2], 2),
round(bounding_box[3], 2),
round(height, 2),
round(width, 2),
round(length, 2),
round(x, 2),
round(y, 2),
round(z, 2),
round(rotation_y, 2))
line_all = line[:-1] + ' ' + name + '\n'
# store the label
fp_label = open(LABEL_PATH + name + '/' + str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'a')
fp_label.write(line)
fp_label.close()
fp_label_all.write(line_all)
fp_label_all.close()
def save_2d_label(self, frame, frame_num, kitti_format=True):
""" parse and save the label data in .txt format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
# preprocess bounding box data
id_to_bbox = dict()
id_to_name = dict()
for labels in frame.camera_labels:
name = labels.name - 1
fp_label = open(LABEL_2D_PATH + str(name) + '/' + str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'a')
for label in labels.labels:
bounding_box = [label.box.center_x - label.box.length / 2, label.box.center_y - label.box.width / 2,
label.box.center_x + label.box.length / 2, label.box.center_y + label.box.width / 2]
my_type = self.__type_list[label.type]
truncated = 0
occluded = 0
alpha = 0
height = 0
width = 0
length = 0
x = 0
y = 0
z = 0
rotation_y = 0
line = my_type + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(round(truncated, 2),
occluded,
round(alpha, 2),
round(bounding_box[0], 2),
round(bounding_box[1], 2),
round(bounding_box[2], 2),
round(bounding_box[3], 2),
round(height, 2),
round(width, 2),
round(length, 2),
round(x, 2),
round(y, 2),
round(z, 2),
round(rotation_y, 2))
fp_label.write(line)
fp_label.close()
def get_file_names(self):
self.__file_names = []
for i in os.listdir(DATA_PATH):
if i.split('.')[-1] == 'tfrecord':
self.__file_names.append(DATA_PATH + '/' + i)
def create_folder(self):
if not os.path.exists(KITTI_PATH):
os.mkdir(KITTI_PATH)
if not os.path.exists(CALIB_PATH):
os.mkdir(CALIB_PATH)
if not os.path.exists(LIDAR_PATH):
os.mkdir(LIDAR_PATH)
if not os.path.exists(LABEL_ALL_PATH):
os.mkdir(LABEL_ALL_PATH)
if not os.path.exists(LABEL_2D_PATH):
os.mkdir(LABEL_2D_PATH)
for i in range(5):
if not os.path.exists(IMAGE_PATH + str(i)):
os.mkdir(IMAGE_PATH + str(i))
if not os.path.exists(LABEL_PATH + str(i)):
os.mkdir(LABEL_PATH + str(i))
if not os.path.exists(LABEL_2D_PATH + str(i)):
os.mkdir(LABEL_2D_PATH + str(i))
def extract_intensity(self, frame, range_images, lidar_num):
""" extract the intensity from the original range image
:param frame: open dataset frame proto
:param frame_num: the current frame number
:param lidar_num: the number of current lidar
:return:
"""
intensity_0 = np.array(range_images[lidar_num][0].data).reshape(-1,4)
intensity_0 = intensity_0[:,1]
intensity_1 = np.array(range_images[lidar_num][1].data).reshape(-1,4)[:,1]
return intensity_0, intensity_1
def image_show(self, data, name, layout, cmap=None):
"""Show an image."""
plt.subplot(*layout)
plt.imshow(tf.image.decode_jpeg(data), cmap=cmap)
plt.title(name)
plt.grid(False)
plt.axis('off')
def parse_range_image_and_camera_projection(self, frame):
"""Parse range images and camera projections given a frame.
Args:
frame: open dataset frame proto
Returns:
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
"""
self.__range_images = {}
# camera_projections = {}
# range_image_top_pose = None
for laser in frame.lasers:
if len(laser.ri_return1.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name] = [ri]
if laser.name == open_dataset.LaserName.TOP:
range_image_top_pose_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
range_image_top_pose = open_dataset.MatrixFloat()
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy()))
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return1.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name] = [cp]
if len(laser.ri_return2.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return2.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name].append(ri)
#
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return2.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name].append(cp)
return self.__range_images, range_image_top_pose
def plot_range_image_helper(self, data, name, layout, vmin=0, vmax=1, cmap='gray'):
"""Plots range image.
Args:
data: range image data
name: the image title
layout: plt layout
vmin: minimum value of the passed data
vmax: maximum value of the passed data
cmap: color map
"""
plt.subplot(*layout)
plt.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(name)
plt.grid(False)
plt.axis('off')
def get_range_image(self, laser_name, return_index):
"""Returns range image given a laser name and its return index."""
return self.__range_images[laser_name][return_index]
def show_range_image(self, range_image, layout_index_start=1):
"""Shows range image.
Args:
range_image: the range image data from a given lidar of type MatrixFloat.
layout_index_start: layout offset
"""
range_image_tensor = tf.convert_to_tensor(range_image.data)
range_image_tensor = tf.reshape(range_image_tensor, range_image.shape.dims)
lidar_image_mask = tf.greater_equal(range_image_tensor, 0)
range_image_tensor = tf.where(lidar_image_mask, range_image_tensor,
tf.ones_like(range_image_tensor) * 1e10)
range_image_range = range_image_tensor[..., 0]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
self.plot_range_image_helper(range_image_range.numpy(), 'range',
[8, 1, layout_index_start], vmax=75, cmap='gray')
self.plot_range_image_helper(range_image_intensity.numpy(), 'intensity',
[8, 1, layout_index_start + 1], vmax=1.5, cmap='gray')
self.plot_range_image_helper(range_image_elongation.numpy(), 'elongation',
[8, 1, layout_index_start + 2], vmax=1.5, cmap='gray')
def convert_range_image_to_point_cloud(self, frame, range_images, range_image_top_pose, ri_index=0):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
intensity: {[N, 1]} list of intensity of length 5 (number of lidars).
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
# lasers = sorted(frame.lasers, key=lambda laser: laser.name)
points = []
# cp_points = []
intensity = []
frame_pose = tf.convert_to_tensor(
np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0:
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == open_dataset.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
intensity_tensor = tf.gather_nd(range_image_tensor,
tf.where(range_image_mask))
# cp = camera_projections[c.name][0]
# cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
# cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points.append(points_tensor.numpy())
# cp_points.append(cp_points_tensor.numpy())
intensity.append(intensity_tensor.numpy()[:, 1])
return points, intensity
def rgba(self, r):
"""Generates a color based on range.
Args:
r: the range value of a given point.
Returns:
The color for a given range
"""
c = plt.get_cmap('jet')((r % 20.0) / 20.0)
c = list(c)
c[-1] = 0.5 # alpha
return c
def plot_image(self, camera_image):
"""Plot a cmaera image."""
plt.figure(figsize=(20, 12))
plt.imshow(tf.image.decode_jpeg(camera_image.image))
plt.grid("off")
def plot_points_on_image(self, projected_points, camera_image, rgba_func, point_size=5.0):
"""Plots points on a camera image.
Args:
projected_points: [N, 3] numpy array. The inner dims are
[camera_x, camera_y, range].
camera_image: jpeg encoded camera image.
rgba_func: a function that generates a color from a range value.
point_size: the point size.
"""
self.plot_image(camera_image)
xs = []
ys = []
colors = []
for point in projected_points:
xs.append(point[0]) # width, col
ys.append(point[1]) # height, row
colors.append(rgba_func(point[2]))
plt.scatter(xs, ys, c=colors, s=point_size, edgecolors="none")
if __name__ == '__main__':
adapter = Adapter()
adapter.cvt()
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
test-integration/src/main/java/org/odata4j/examples/consumer/CodePlexConsumerExample.java | package org.odata4j.examples.consumer;
import org.core4j.Enumerable;
import org.odata4j.consumer.ODataConsumer;
import org.odata4j.consumer.ODataConsumers;
import org.odata4j.consumer.behaviors.OClientBehaviors;
import org.odata4j.core.OEntity;
import org.odata4j.core.ORelatedEntitiesLink;
import org.odata4j.examples.AbstractExample;
public class CodePlexConsumerExample extends AbstractExample {
// for more info: https://codeplexodata.cloudapp.net/
private static final int MAX_LISTING = 5;
public static void main(String[] args) {
CodePlexConsumerExample example = new CodePlexConsumerExample();
example.run(args);
}
private void run(String[] args) {
ODataConsumer.dump.requestHeaders(true);
String[] codeplexCreds = args.length > 0 ? args : System.getenv("CODEPLEX").split(":");
String codeplexUser = "snd\\" + codeplexCreds[0] + "_cp";
String codeplexPassword = codeplexCreds[1];
for (String collection : Enumerable.create("TFS03", "TFS05", "TFS09")) {
ODataConsumer c = ODataConsumers.newBuilder("https://codeplexodata.cloudapp.net/" + collection)
.setClientBehaviors(OClientBehaviors.basicAuth(codeplexUser, codeplexPassword))
.build();
for (OEntity p : c.getEntities("Projects").execute()) {
reportEntity("project:", p);
if (p.getProperty("Name", String.class).getValue().equals("s3"))
continue;
for (OEntity cs : listChildren(c, p, "Changesets")) {
reportEntity("changeset:", cs);
for (OEntity ch : listChildren(c, cs, "Changes")) {
reportEntity("change:", ch);
}
}
for (OEntity wi : listChildren(c, p, "WorkItems")) {
reportEntity("workitem:", wi);
for (OEntity a : listChildren(c, wi, "Attachments")) {
reportEntity("attachment:", a);
}
}
}
}
}
private static Iterable<OEntity> listChildren(ODataConsumer c, OEntity parent, String child) {
return c.getEntities(parent.getLink(child, ORelatedEntitiesLink.class)).execute().take(MAX_LISTING);
}
}
| [
"\"CODEPLEX\""
]
| []
| [
"CODEPLEX"
]
| [] | ["CODEPLEX"] | java | 1 | 0 | |
mapofinnovation/controllers/uifunc.py | import logging
import json
import redis
import sys
import urllib
import os
from urlparse import urlparse
from pylons import request, response, session, tmpl_context as c, url
from pylons.decorators import jsonify
from pylons.controllers.util import abort, redirect
from mapofinnovation.lib.base import BaseController, render
log = logging.getLogger(__name__)
class UifuncController(BaseController):
def index(self):
# Return a rendered front page template
markers = []
indices = {
"name": "name",
"city": "city",
"country": "country",
"website": "primary_website",
"primarytype": "primary_type",
"multitypes": "types_multiple",
"description": "description",
"latitude": "latitude",
"longitude":"longitude",
"services": "services"
}
if os.environ.get("REDIS_URL") :
redis_url = os.environ.get("REDIS_URL")
else:
redis_url = "localhost"
r = redis.from_url(redis_url)
i = 0
for key in r.scan_iter():
marker = {}
row = r.hgetall(key)
for header in indices.keys():
marker[header] = unicode(row[str(indices[header])], errors='replace')
markers.append(marker)
c.markers = json.dumps(markers)
return render('/makermap.html')
def wikipage(self,id=None):
#Return a wiki for the given space
if os.environ.get("REDIS_URL") :
redis_url = os.environ.get("REDIS_URL")
else:
redis_url = "localhost"
r = redis.from_url(redis_url)
if id is None :
return 'Provide a valid space id'
elif r.exists(id):
data = r.hgetall(id)
addresstext = str(data['street_address']).decode("ISO-8859-1")
websitetext = urllib.unquote(data['primary_website']).decode('utf8')
return render('/wikipage.html',extra_vars={'last_updated':str(data['last_updated']),'name':str(data['name']),'status':str(data['status']),'website_url':websitetext,'primarytype':str(data['primary_type']),'secondarytype':'','space_description':str(data['description']),'address':addresstext})
else :
return 'There is no space with this id. Please recheck and submit'
def about(self):
return render('/about.html')
def goals(self):
return render('/goals.html')
def userDocs(self):
return render('/user-documentation.html')
def devDocs(self):
return render('/developer-documentation.html') | []
| []
| [
"REDIS_URL"
]
| [] | ["REDIS_URL"] | python | 1 | 0 | |
src/func/physiological_regressors.py |
import os
import sys
import numpy as np
import nibabel as nib
from scipy.io import savemat
###### print to log files #######
QCfile_name = ''.join([os.environ['QCfile_name'],'.log'])
fqc=open(QCfile_name, "a+")
logfile_name = ''.join([os.environ['logfile_name'],'.log'])
flog=open(logfile_name, "a+")
flog.write("\n *** python time_series **** ")
EPIpath=os.environ['EPIpath']
fileIN=sys.argv[1]
flog.write("\n"+"fileIN "+ fileIN)
aCompCorr=sys.argv[2]
flog.write("\n aCompCorr "+ aCompCorr)
num_comp=int(sys.argv[3])
flog.write("\n num_comp "+ str(num_comp))
PhReg_path=sys.argv[4]
flog.write("\n PhReg_path "+ PhReg_path)
def get_ts(vol,numTP,rest):
numVoxels = np.count_nonzero(vol);
print("numVoxels - ",numVoxels)
mask = np.nonzero(vol != 0)
ts = np.zeros((numVoxels,numTP))
for ind in range(0,numTP):
rvol = rest[:,:,:,ind]
rvals = rvol[mask[0],mask[1],mask[2]]
ts[:,ind] = rvals
return ts,mask
def get_pca(data, n_comp):
from sklearn.decomposition import PCA
print("data shape: ",data.shape)
pca = PCA(n_components = n_comp)
pca.fit(data)
PC = pca.components_
print("PC shape ",PC.shape)
PCtop = PC
latent = pca.explained_variance_
print("latent: ",latent)
variance = np.true_divide(np.cumsum(latent),np.sum(latent))
print("explained variance: ",variance)
return PCtop,variance
### load data and masks
resting = nib.load(fileIN)
resting_vol = resting.get_data()
[sizeX,sizeY,sizeZ,numTimePoints] = resting_vol.shape
print("resting_vol.shape ", sizeX,sizeY,sizeZ,numTimePoints)
fname = ''.join([EPIpath,'/rT1_CSFvent_mask_eroded.nii.gz'])
volCSFvent_vol = nib.load(fname).get_data()
numVoxels = np.count_nonzero(volCSFvent_vol);
fname = ''.join([EPIpath,'/rT1_WM_mask_eroded.nii.gz'])
volWM_vol = nib.load(fname).get_data()
numVoxels = np.count_nonzero(volWM_vol);
### CSFvent time-series
[CSFts,CSFmask] = get_ts(volCSFvent_vol,numTimePoints,resting_vol);
### WM time-series
[WMts,WMmask] = get_ts(volWM_vol,numTimePoints,resting_vol);
if aCompCorr.lower() in ['true','1']:
print("-------------aCompCorr--------------")
flog.write("\n Physiological Reg: aCompCorr.\n")
[CSFpca,CSFvar] = get_pca(CSFts,num_comp)
flog.write("\n Running PCA on CSF time-series.\n")
[WMpca,WMvar] = get_pca(WMts,num_comp)
flog.write("\n Running PCA on WM time-series.\n")
# save the data
fname = ''.join([PhReg_path,'/dataPCA_WM-CSF.npz'])
np.savez(fname,CSFpca=CSFpca,CSFvar=CSFvar,CSFmask=CSFmask,CSFts=CSFts,WMpca=WMpca,WMvar=WMvar,WMmask=WMmask,WMts=WMts)
fname = ''.join([PhReg_path,'/dataPCA_WM-CSF.mat'])
print("saving MATLAB file ", fname)
mdic = {"CSFpca" : CSFpca,"CSFvar" : CSFvar,"CSFmask" : CSFmask,"CSFts" : CSFts,"WMpca" : WMpca,"WMvar" : WMvar,"WMmask" : WMmask,"WMts" : WMts}
savemat(fname, mdic)
print("Saved aCompCor PCA regressors")
else:
print("-------------Mean CSF and WM Regression--------------")
flog.write("\n Physiological Reg: Mean CSF and WM Regression.\n")
CSFavg = np.mean(CSFts,axis=0)
CSFderiv = np.append(0,np.diff(CSFavg));
CSFavg_sq = np.power(CSFavg,2)
CSFderiv_sq = np.power(CSFderiv,2)
WMavg = np.mean(WMts,axis=0)
WMderiv = np.append(0,np.diff(WMavg));
WMavg_sq = np.power(WMavg,2)
WMderiv_sq = np.power(WMderiv,2)
# save the data
fname = ''.join([PhReg_path,'/dataMnRg_WM-CSF.npz'])
np.savez(fname,CSFavg=CSFavg,CSFavg_sq=CSFavg_sq,CSFderiv=CSFderiv,CSFderiv_sq=CSFderiv_sq,WMavg=WMavg,WMavg_sq=WMavg_sq,WMderiv=WMderiv,WMderiv_sq=WMderiv_sq)
print("savign MATLAB file ", fname)
fname = ''.join([PhReg_path,'/dataMnRg_WM-CSF.mat'])
mdic = {"CSFavg" : CSFavg,"CSFavg_sq" : CSFavg_sq,"CSFderiv" : CSFderiv,"CSFderiv_sq" : CSFderiv_sq,"WMavg" : WMavg,"WMavg_sq" : WMavg_sq,"WMderiv" : WMderiv,"WMderiv_sq" : WMderiv_sq}
savemat(fname, mdic)
print("saved mean CSF WM signal, derivatives, and quadtatics")
fqc.close()
flog.close()
| []
| []
| [
"logfile_name",
"EPIpath",
"QCfile_name"
]
| [] | ["logfile_name", "EPIpath", "QCfile_name"] | python | 3 | 0 | |
product_search/settings.py | import os
import dj_database_url
import django_heroku
import dotenv
dotenv.read_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
SECRET_KEY = os.environ["SECRET_KEY"]
DEBUG = os.environ["DEBUG"] == "true"
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"graphene_django",
"corsheaders",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "product_search.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"debug": DEBUG,
},
}
]
WSGI_APPLICATION = "product_search.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES["default"].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Allow all host headers
ALLOWED_HOSTS = ["*"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, "staticfiles")
STATIC_URL = "/static/"
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [os.path.join(PROJECT_ROOT, "static")]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Django graphene
# http://docs.graphene-python.org/projects/django/en/latest/
GRAPHENE = {"SCHEMA": "server.graphql.schema"}
# Cors
# https://github.com/ottoyiu/django-cors-headers
CORS_ORIGIN_ALLOW_ALL = True
# Application variables
WALMART_API_URL = os.environ["WALMART_API_URL"]
WALMART_API_KEY = os.environ["WALMART_API_KEY"]
# Activate Django-Heroku.
django_heroku.settings(locals())
| []
| []
| [
"SECRET_KEY",
"WALMART_API_KEY",
"WALMART_API_URL",
"DEBUG"
]
| [] | ["SECRET_KEY", "WALMART_API_KEY", "WALMART_API_URL", "DEBUG"] | python | 4 | 0 | |
tests.py | #!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(*test_args):
os.environ['DJANGO_SETTINGS_MODULE'] = 'password_policies.tests.test_settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["password_policies.tests"])
sys.exit(bool(failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
test/images/sample-device-plugin/sampledeviceplugin.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"path/filepath"
"time"
"k8s.io/klog"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
dm "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
)
const (
resourceName = "example.com/resource"
)
// stubAllocFunc creates and returns allocation response for the input allocate request
func stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) {
var responses pluginapi.AllocateResponse
for _, req := range r.ContainerRequests {
response := &pluginapi.ContainerAllocateResponse{}
for _, requestID := range req.DevicesIDs {
dev, ok := devs[requestID]
if !ok {
return nil, fmt.Errorf("invalid allocation request with non-existing device %s", requestID)
}
if dev.Health != pluginapi.Healthy {
return nil, fmt.Errorf("invalid allocation request with unhealthy device: %s", requestID)
}
// create fake device file
fpath := filepath.Join("/tmp", dev.ID)
// clean first
if err := os.RemoveAll(fpath); err != nil {
return nil, fmt.Errorf("failed to clean fake device file from previous run: %s", err)
}
f, err := os.Create(fpath)
if err != nil && !os.IsExist(err) {
return nil, fmt.Errorf("failed to create fake device file: %s", err)
}
f.Close()
response.Mounts = append(response.Mounts, &pluginapi.Mount{
ContainerPath: fpath,
HostPath: fpath,
})
}
responses.ContainerResponses = append(responses.ContainerResponses, response)
}
return &responses, nil
}
func main() {
devs := []*pluginapi.Device{
{ID: "Dev-1", Health: pluginapi.Healthy},
{ID: "Dev-2", Health: pluginapi.Healthy},
}
pluginSocksDir := os.Getenv("PLUGIN_SOCK_DIR")
klog.Infof("pluginSocksDir: %s", pluginSocksDir)
if pluginSocksDir == "" {
klog.Errorf("Empty pluginSocksDir")
return
}
socketPath := pluginSocksDir + "/dp." + fmt.Sprintf("%d", time.Now().Unix())
dp1 := dm.NewDevicePluginStub(devs, socketPath, resourceName, false)
if err := dp1.Start(); err != nil {
panic(err)
}
dp1.SetAllocFunc(stubAllocFunc)
if err := dp1.Register(pluginapi.KubeletSocket, resourceName, pluginapi.DevicePluginPath); err != nil {
panic(err)
}
select {}
}
| [
"\"PLUGIN_SOCK_DIR\""
]
| []
| [
"PLUGIN_SOCK_DIR"
]
| [] | ["PLUGIN_SOCK_DIR"] | go | 1 | 0 | |
deploy/docker/conf_util.go | package main
import (
"fmt"
"github.com/go-yaml/yaml"
"io/ioutil"
"os"
"strconv"
"strings"
)
var khome = os.Getenv("KUIPER_HOME")
var fileMap = map[string]string{
"edgex": khome + "/etc/sources/edgex.yaml",
"random": khome + "/etc/sources/random.yaml",
"zmq": khome + "/etc/sources/zmq.yaml",
"mqtt_source": khome + "/etc/mqtt_source.yaml",
"kuiper": khome + "/etc/kuiper.yaml",
"client": khome + "/etc/client.yaml",
}
var file_keys_map = map[string]map[string]string{
"edgex": {
"CLIENTID": "ClientId",
"USERNAME": "Username",
"PASSWORD": "Password",
"QOS": "Qos",
"KEEPALIVE": "KeepAlive",
"RETAINED": "Retained",
"CONNECTIONPAYLOAD": "ConnectionPayload",
"CERTFILE": "CertFile",
"KEYFILE": "KeyFile",
"CERTPEMBLOCK": "CertPEMBlock",
"KEYPEMBLOCK": "KeyPEMBlock",
"SKIPCERTVERIFY": "SkipCertVerify",
},
"mqtt_source": {
"SHAREDSUBSCRIPTION": "sharedSubscription",
"CERTIFICATIONPATH": "certificationPath",
"PRIVATEKEYPATH": "privateKeyPath",
},
"kuiper": {
"CONSOLELOG": "consoleLog",
"FILELOG": "fileLog",
"RESTPORT": "restPort",
"RESTTLS": "restTls",
"PROMETHEUSPORT": "prometheusPort",
},
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func deleteFile(path string) {
// delete file
var err = os.Remove(path)
if err != nil {
return
}
fmt.Println("File Deleted")
}
func main() {
fmt.Println(fileMap["edgex"])
files := make(map[string]map[interface{}]interface{})
ProcessEnv(files, os.Environ())
for f, v := range files {
if bs, err := yaml.Marshal(v); err != nil {
fmt.Println(err)
} else {
message := fmt.Sprintf("-------------------\nConf file %s: \n %s", f, string(bs))
fmt.Println(message)
if fname, ok := fileMap[f]; ok {
if fileExists(fname) {
deleteFile(fname)
}
if e := ioutil.WriteFile(fname, bs, 0644); e != nil {
fmt.Println(e)
}
}
}
}
}
func ProcessEnv(files map[string]map[interface{}]interface{}, vars []string) {
for _, e := range vars {
pair := strings.SplitN(e, "=", 2)
if len(pair) != 2 {
fmt.Printf("invalid env %s, skip it.\n", e)
continue
}
valid := false
for k, _ := range fileMap {
if strings.HasPrefix(pair[0], strings.ToUpper(k)) {
valid = true
break
}
}
if !valid {
continue
} else {
fmt.Printf("Find env: %s, start to handle it.\n", e)
}
env_v := strings.ReplaceAll(pair[0], "__", ".")
keys := strings.Split(env_v, ".")
for i, v := range keys {
keys[i] = v
}
if len(keys) < 2 {
fmt.Printf("not concerned env %s, skip it.\n", e)
continue
} else {
k := strings.ToLower(keys[0])
if v, ok := files[k]; !ok {
if data, err := ioutil.ReadFile(fileMap[k]); err != nil {
fmt.Printf("%s\n", err)
} else {
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
fmt.Println(err)
}
files[k] = m
Handle(k, m, keys[1:], pair[1])
}
} else {
Handle(k, v, keys[1:], pair[1])
}
}
}
}
func Handle(file string, conf map[interface{}]interface{}, skeys []string, val string) {
key := getKey(file, skeys[0])
if len(skeys) == 1 {
conf[key] = getValueType(val)
} else if len(skeys) >= 2 {
if v, ok := conf[key]; ok {
if v1, ok1 := v.(map[interface{}]interface{}); ok1 {
Handle(file, v1, skeys[1:], val)
} else {
fmt.Printf("Not expected map: %v\n", v)
}
} else {
v1 := make(map[interface{}]interface{})
conf[key] = v1
Handle(file, v1, skeys[1:], val)
}
}
}
func getKey(file string, key string) string{
if m, ok := file_keys_map[file][key]; ok {
return m
} else {
return strings.ToLower(key)
}
}
func getValueType(val string) interface{} {
if i, err := strconv.ParseInt(val, 10, 64); err == nil {
return i
} else if b, err := strconv.ParseBool(val); err == nil {
return b
} else if f, err := strconv.ParseFloat(val, 64); err == nil {
return f
}
return val
}
| [
"\"KUIPER_HOME\""
]
| []
| [
"KUIPER_HOME"
]
| [] | ["KUIPER_HOME"] | go | 1 | 0 | |
etc/structure/project_name/settings.py | """
Django settings for {{project_name}} project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# Environment
SECRET_KEY = os.getenv("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "{{project_name}}.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "{{project_name}}.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| []
| []
| [
"SECRET_KEY"
]
| [] | ["SECRET_KEY"] | python | 1 | 0 | |
cmd/zoekt-mirror-github/main.go | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This binary fetches all repos of a user or organization and clones
// them. It is strongly recommended to get a personal API token from
// https://github.com/settings/tokens, save the token in a file, and
// point the --token option to it.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/google/go-github/v27/github"
"golang.org/x/oauth2"
"github.com/google/zoekt/gitindex"
)
type topicsFlag []string
func (f *topicsFlag) String() string {
return strings.Join(*f, ",")
}
func (f *topicsFlag) Set(value string) error {
*f = append(*f, value)
return nil
}
type reposFilters struct {
topics []string
excludeTopics []string
noArchived *bool
}
func main() {
dest := flag.String("dest", "", "destination directory")
githubURL := flag.String("url", "", "GitHub Enterprise url. If not set github.com will be used as the host.")
org := flag.String("org", "", "organization to mirror")
user := flag.String("user", "", "user to mirror")
token := flag.String("token",
filepath.Join(os.Getenv("HOME"), ".github-token"),
"file holding API token.")
forks := flag.Bool("forks", false, "also mirror forks.")
deleteRepos := flag.Bool("delete", false, "delete missing repos")
namePattern := flag.String("name", "", "only clone repos whose name matches the given regexp.")
excludePattern := flag.String("exclude", "", "don't mirror repos whose names match this regexp.")
topics := topicsFlag{}
flag.Var(&topics, "topic", "only clone repos whose have one of given topics. You can add multiple topics by setting this more than once.")
excludeTopics := topicsFlag{}
flag.Var(&excludeTopics, "exclude_topic", "don't clone repos whose have one of given topics. You can add multiple topics by setting this more than once.")
noArchived := flag.Bool("no_archived", false, "mirror only projects that are not archived")
flag.Parse()
if *dest == "" {
log.Fatal("must set --dest")
}
if *githubURL == "" && *org == "" && *user == "" {
log.Fatal("must set either --org or --user when github.com is used as host")
}
var host string
var apiBaseURL string
var client *github.Client
if *githubURL != "" {
rootURL, err := url.Parse(*githubURL)
if err != nil {
log.Fatal(err)
}
host = rootURL.Host
apiPath, err := url.Parse("/api/v3/")
if err != nil {
log.Fatal(err)
}
apiBaseURL = rootURL.ResolveReference(apiPath).String()
client, err = github.NewEnterpriseClient(apiBaseURL, apiBaseURL, nil)
if err != nil {
log.Fatal(err)
}
} else {
host = "github.com"
apiBaseURL = "https://github.com/"
client = github.NewClient(nil)
}
destDir := filepath.Join(*dest, host)
if err := os.MkdirAll(destDir, 0o755); err != nil {
log.Fatal(err)
}
if *token != "" {
content, err := ioutil.ReadFile(*token)
if err != nil {
log.Fatal(err)
}
ts := oauth2.StaticTokenSource(
&oauth2.Token{
AccessToken: strings.TrimSpace(string(content)),
})
tc := oauth2.NewClient(context.Background(), ts)
if *githubURL != "" {
client, err = github.NewEnterpriseClient(apiBaseURL, apiBaseURL, tc)
if err != nil {
log.Fatal(err)
}
} else {
client = github.NewClient(tc)
}
}
reposFilters := reposFilters{
topics: topics,
excludeTopics: excludeTopics,
noArchived: noArchived,
}
var repos []*github.Repository
var err error
if *org != "" {
repos, err = getOrgRepos(client, *org, reposFilters)
} else if *user != "" {
repos, err = getUserRepos(client, *user, reposFilters)
} else {
log.Printf("no user or org specified, cloning all repos.")
repos, err = getUserRepos(client, "", reposFilters)
}
if err != nil {
log.Fatal(err)
}
if !*forks {
trimmed := repos[:0]
for _, r := range repos {
if r.Fork == nil || !*r.Fork {
trimmed = append(trimmed, r)
}
}
repos = trimmed
}
filter, err := gitindex.NewFilter(*namePattern, *excludePattern)
if err != nil {
log.Fatal(err)
}
{
trimmed := repos[:0]
for _, r := range repos {
if filter.Include(*r.Name) {
trimmed = append(trimmed, r)
}
}
repos = trimmed
}
if err := cloneRepos(destDir, repos); err != nil {
log.Fatalf("cloneRepos: %v", err)
}
if *deleteRepos {
if err := deleteStaleRepos(*dest, filter, repos, *org+*user); err != nil {
log.Fatalf("deleteStaleRepos: %v", err)
}
}
}
func deleteStaleRepos(destDir string, filter *gitindex.Filter, repos []*github.Repository, user string) error {
var baseURL string
if len(repos) > 0 {
baseURL = *repos[0].HTMLURL
} else {
return nil
}
u, err := url.Parse(baseURL)
if err != nil {
return err
}
u.Path = user
names := map[string]struct{}{}
for _, r := range repos {
u, err := url.Parse(*r.HTMLURL)
if err != nil {
return err
}
names[filepath.Join(u.Host, u.Path+".git")] = struct{}{}
}
if err := gitindex.DeleteRepos(destDir, u, names, filter); err != nil {
log.Fatalf("deleteRepos: %v", err)
}
return nil
}
func hasIntersection(s1, s2 []string) bool {
hash := make(map[string]bool)
for _, e := range s1 {
hash[e] = true
}
for _, e := range s2 {
if hash[e] {
return true
}
}
return false
}
func filterRepositories(repos []*github.Repository, include []string, exclude []string, noArchived bool) (filteredRepos []*github.Repository) {
for _, repo := range repos {
if noArchived && *repo.Archived {
continue
}
if (len(include) == 0 || hasIntersection(include, repo.Topics)) &&
!hasIntersection(exclude, repo.Topics) {
filteredRepos = append(filteredRepos, repo)
}
}
return
}
func getOrgRepos(client *github.Client, org string, reposFilters reposFilters) ([]*github.Repository, error) {
var allRepos []*github.Repository
opt := &github.RepositoryListByOrgOptions{}
for {
repos, resp, err := client.Repositories.ListByOrg(context.Background(), org, opt)
if err != nil {
return nil, err
}
if len(repos) == 0 {
break
}
opt.Page = resp.NextPage
repos = filterRepositories(repos, reposFilters.topics, reposFilters.excludeTopics, *reposFilters.noArchived)
allRepos = append(allRepos, repos...)
if resp.NextPage == 0 {
break
}
}
return allRepos, nil
}
func getUserRepos(client *github.Client, user string, reposFilters reposFilters) ([]*github.Repository, error) {
var allRepos []*github.Repository
opt := &github.RepositoryListOptions{}
for {
repos, resp, err := client.Repositories.List(context.Background(), user, opt)
if err != nil {
return nil, err
}
if len(repos) == 0 {
break
}
opt.Page = resp.NextPage
repos = filterRepositories(repos, reposFilters.topics, reposFilters.excludeTopics, *reposFilters.noArchived)
allRepos = append(allRepos, repos...)
if resp.NextPage == 0 {
break
}
}
return allRepos, nil
}
func itoa(p *int) string {
if p != nil {
return strconv.Itoa(*p)
}
return ""
}
func cloneRepos(destDir string, repos []*github.Repository) error {
for _, r := range repos {
host, err := url.Parse(*r.HTMLURL)
if err != nil {
return err
}
config := map[string]string{
"zoekt.web-url-type": "github",
"zoekt.web-url": *r.HTMLURL,
"zoekt.name": filepath.Join(host.Hostname(), *r.FullName),
"zoekt.github-stars": itoa(r.StargazersCount),
"zoekt.github-watchers": itoa(r.WatchersCount),
"zoekt.github-subscribers": itoa(r.SubscribersCount),
"zoekt.github-forks": itoa(r.ForksCount),
}
dest, err := gitindex.CloneRepo(destDir, *r.FullName, *r.CloneURL, config)
if err != nil {
return err
}
if dest != "" {
fmt.Println(dest)
}
}
return nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
main.go | package main
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
stdlog "log"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/centrifugal/centrifugo/internal/admin"
"github.com/centrifugal/centrifugo/internal/api"
"github.com/centrifugal/centrifugo/internal/client"
"github.com/centrifugal/centrifugo/internal/health"
"github.com/centrifugal/centrifugo/internal/jwtutils"
"github.com/centrifugal/centrifugo/internal/jwtverify"
"github.com/centrifugal/centrifugo/internal/logutils"
"github.com/centrifugal/centrifugo/internal/metrics/graphite"
"github.com/centrifugal/centrifugo/internal/middleware"
"github.com/centrifugal/centrifugo/internal/natsbroker"
"github.com/centrifugal/centrifugo/internal/proxy"
"github.com/centrifugal/centrifugo/internal/rule"
"github.com/centrifugal/centrifugo/internal/tools"
"github.com/centrifugal/centrifugo/internal/webui"
"github.com/FZambia/viper-lite"
"github.com/centrifugal/centrifuge"
"github.com/mattn/go-isatty"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// VERSION of Centrifugo server. Set on build stage.
var VERSION string
var configDefaults = map[string]interface{}{
"gomaxprocs": 0,
"engine": "memory",
"broker": "",
"name": "",
"secret": "",
"token_hmac_secret_key": "",
"token_jwks_public_endpoint": "",
"token_rsa_public_key": "",
"server_side": false,
"publish": false,
"subscribe_to_publish": false,
"anonymous": false,
"presence": false,
"presence_disable_for_client": false,
"history_size": 0,
"history_lifetime": 0,
"history_recover": false,
"history_disable_for_client": false,
"proxy_subscribe": false,
"proxy_publish": false,
"node_info_metrics_aggregate_interval": 60,
"client_anonymous": false,
"client_expired_close_delay": 25,
"client_expired_sub_close_delay": 25,
"client_stale_close_delay": 25,
"client_channel_limit": 128,
"client_queue_max_size": 10485760, // 10MB
"client_presence_ping_interval": 25,
"client_presence_expire_interval": 60,
"client_user_connection_limit": 0,
"client_channel_position_check_delay": 40,
"channel_max_length": 255,
"channel_private_prefix": "$",
"channel_namespace_boundary": ":",
"channel_user_boundary": "#",
"channel_user_separator": ",",
"user_subscribe_to_personal": false,
"user_personal_channel_namespace": "",
"user_personal_single_connection": false,
"client_concurrency": 0,
"debug": false,
"prometheus": false,
"health": false,
"admin": false,
"admin_password": "",
"admin_secret": "",
"admin_insecure": false,
"admin_web_path": "",
"sockjs_url": "https://cdn.jsdelivr.net/npm/sockjs-client@1/dist/sockjs.min.js",
"sockjs_heartbeat_delay": 25,
"websocket_compression": false,
"websocket_compression_min_size": 0,
"websocket_compression_level": 1,
"websocket_read_buffer_size": 0,
"websocket_use_write_buffer_pool": false,
"websocket_write_buffer_size": 0,
"client_ping_interval": 25, // TODO v3: remove.
"websocket_ping_interval": 25,
"client_message_write_timeout": 1, // TODO v3: remove.
"websocket_write_timeout": 1,
"client_request_max_size": 65536, // TODO v3: remove.
"websocket_message_size_limit": 65536, // 64KB
"tls_autocert": false,
"tls_autocert_host_whitelist": "",
"tls_autocert_cache_dir": "",
"tls_autocert_email": "",
"tls_autocert_force_rsa": false, // TODO v3: remove.
"tls_autocert_server_name": "",
"tls_autocert_http": false,
"tls_autocert_http_addr": ":80",
"redis_prefix": "centrifugo",
"redis_connect_timeout": 1, // TODO v3: make all timeouts float.
"redis_read_timeout": 5,
"redis_write_timeout": 1,
"redis_idle_timeout": 0,
"redis_pubsub_num_workers": 0, // TODO v3: remove.
"redis_sequence_ttl": 0,
"grpc_api": false,
"grpc_api_port": 10000,
"shutdown_timeout": 30,
"shutdown_termination_delay": 0,
"graphite": false,
"graphite_host": "localhost",
"graphite_port": 2003,
"graphite_prefix": "centrifugo",
"graphite_interval": 10,
"graphite_tags": false,
"nats_prefix": "centrifugo",
"nats_url": "",
"nats_dial_timeout": 1,
"nats_write_timeout": 1,
"websocket_disable": false,
"sockjs_disable": false,
"api_disable": false,
"admin_handler_prefix": "",
"websocket_handler_prefix": "/connection/websocket",
"sockjs_handler_prefix": "/connection/sockjs",
"api_handler_prefix": "/api",
"prometheus_handler_prefix": "/metrics",
"health_handler_prefix": "/health",
"proxy_connect_endpoint": "",
"proxy_connect_timeout": 1,
"proxy_rpc_endpoint": "",
"proxy_rpc_timeout": 1,
"proxy_refresh_endpoint": "",
"proxy_refresh_timeout": 1,
"memory_history_meta_ttl": 0,
"redis_history_meta_ttl": 0,
"pm_nats_url": "",
"v3_use_offset": false, // TODO v3: remove.
}
func main() {
var configFile string
viper.SetEnvPrefix("centrifugo")
bindConfig := func() {
for k, v := range configDefaults {
viper.SetDefault(k, v)
}
bindEnvs := []string{
"address", "admin", "admin_external", "admin_insecure", "admin_password",
"admin_secret", "admin_web_path", "anonymous", "api_insecure", "api_key",
"channel_max_length", "channel_namespace_boundary", "channel_private_prefix",
"channel_user_boundary", "channel_user_separator", "client_anonymous",
"client_channel_limit", "client_channel_position_check_delay",
"client_expired_close_delay", "client_expired_sub_close_delay",
"client_insecure", "client_message_write_timeout", "client_ping_interval",
"client_presence_expire_interval", "client_presence_ping_interval",
"client_queue_max_size", "client_request_max_size", "client_stale_close_delay",
"debug", "engine", "graphite", "graphite_host", "graphite_interval",
"graphite_port", "graphite_prefix", "graphite_tags", "grpc_api",
"grpc_api_port", "health", "history_lifetime", "history_recover",
"history_size", "internal_address", "internal_port", "join_leave", "log_file",
"log_level", "name", "namespaces", "node_info_metrics_aggregate_interval",
"pid_file", "port", "presence", "prometheus", "publish", "redis_connect_timeout",
"redis_db", "redis_host", "redis_idle_timeout", "redis_master_name",
"redis_password", "redis_port", "redis_prefix", "redis_pubsub_num_workers",
"redis_read_timeout", "redis_sentinels", "redis_tls", "redis_tls_skip_verify",
"redis_url", "redis_write_timeout", "secret", "shutdown_termination_delay",
"shutdown_timeout", "sockjs_heartbeat_delay", "sockjs_url", "subscribe_to_publish",
"tls", "tls_autocert", "tls_autocert_cache_dir", "tls_autocert_email",
"tls_autocert_force_rsa", "tls_autocert_host_whitelist", "tls_autocert_http",
"tls_autocert_http_addr", "tls_autocert_server_name", "tls_cert", "tls_external",
"tls_key", "websocket_compression", "websocket_compression_level",
"websocket_compression_min_size", "websocket_read_buffer_size",
"websocket_write_buffer_size", "history_disable_for_client",
"presence_disable_for_client", "admin_handler_prefix", "websocket_handler_prefix",
"sockjs_handler_prefix", "api_handler_prefix", "prometheus_handler_prefix",
"health_handler_prefix", "grpc_api_tls", "grpc_api_tls_disable",
"grpc_api_tls_cert", "grpc_api_tls_key", "proxy_connect_endpoint",
"proxy_connect_timeout", "proxy_rpc_endpoint", "proxy_rpc_timeout",
"proxy_refresh_endpoint", "proxy_refresh_timeout",
"token_jwks_public_endpoint", "token_rsa_public_key", "token_hmac_secret_key", "redis_sequence_ttl",
"proxy_extra_http_headers", "server_side", "user_subscribe_to_personal",
"user_personal_channel_namespace", "websocket_use_write_buffer_pool",
"websocket_disable", "sockjs_disable", "api_disable", "redis_cluster_addrs",
"broker", "nats_prefix", "nats_url", "nats_dial_timeout", "nats_write_timeout",
"v3_use_offset", "redis_history_meta_ttl", "redis_streams", "memory_history_meta_ttl",
"websocket_ping_interval", "websocket_write_timeout", "websocket_message_size_limit",
"proxy_publish_endpoint", "proxy_publish_timeout", "proxy_subscribe_endpoint",
"proxy_subscribe_timeout", "proxy_subscribe", "proxy_publish", "redis_sentinel_password",
"grpc_api_key", "client_concurrency", "user_personal_single_connection", "pm_nats_url",
}
for _, env := range bindEnvs {
_ = viper.BindEnv(env)
}
}
var rootCmd = &cobra.Command{
Use: "",
Short: "Centrifugo",
Long: "Centrifugo – scalable real-time messaging server in language-agnostic way",
Run: func(cmd *cobra.Command, args []string) {
bindConfig()
bindPFlags := []string{
"engine", "log_level", "log_file", "pid_file", "debug", "name", "admin",
"admin_external", "client_insecure", "admin_insecure", "api_insecure",
"port", "address", "tls", "tls_cert", "tls_key", "tls_external", "internal_port",
"internal_address", "prometheus", "health", "redis_host", "redis_port",
"redis_password", "redis_db", "redis_url", "redis_tls", "redis_tls_skip_verify",
"redis_master_name", "redis_sentinels", "redis_sentinel_password", "grpc_api",
"grpc_api_tls", "grpc_api_tls_disable", "grpc_api_tls_cert", "grpc_api_tls_key",
"grpc_api_port", "broker", "nats_url",
}
for _, flag := range bindPFlags {
_ = viper.BindPFlag(flag, cmd.Flags().Lookup(flag))
}
viper.SetConfigFile(configFile)
absConfPath, err := filepath.Abs(configFile)
if err != nil {
log.Fatal().Msgf("error retrieving config file absolute path: %v", err)
}
err = viper.ReadInConfig()
configFound := true
if err != nil {
switch err.(type) {
case viper.ConfigParseError:
log.Fatal().Msgf("error parsing configuration: %s\n", err)
default:
configFound = false
}
}
file := setupLogging()
if file != nil {
defer file.Close()
}
err = writePidFile(viper.GetString("pid_file"))
if err != nil {
log.Fatal().Msgf("error writing PID: %v", err)
}
if os.Getenv("GOMAXPROCS") == "" {
if viper.IsSet("gomaxprocs") && viper.GetInt("gomaxprocs") > 0 {
runtime.GOMAXPROCS(viper.GetInt("gomaxprocs"))
} else {
runtime.GOMAXPROCS(runtime.NumCPU())
}
}
version := VERSION
if version == "" {
version = "dev"
}
engineName := viper.GetString("engine")
log.Info().Str(
"version", version).Str(
"runtime", runtime.Version()).Int(
"pid", os.Getpid()).Str(
"engine", strings.Title(engineName)).Int(
"gomaxprocs", runtime.GOMAXPROCS(0)).Msg("starting Centrifugo")
log.Info().Str("path", absConfPath).Msg("using config file")
proxyConfig, _ := proxyConfig()
ruleConfig := ruleConfig()
err = ruleConfig.Validate()
if err != nil {
log.Fatal().Msgf("error validating config: %v", err)
}
ruleContainer := rule.NewContainer(ruleConfig)
nodeConfig := nodeConfig(VERSION)
if !viper.GetBool("v3_use_offset") {
log.Warn().Msgf("consider migrating to offset protocol field, details: https://github.com/centrifugal/centrifugo/releases/tag/v2.5.0")
// TODO v3: remove compatibility flags.
centrifuge.CompatibilityFlags |= centrifuge.UseSeqGen
}
node, err := centrifuge.New(nodeConfig)
if err != nil {
log.Fatal().Msgf("error creating Centrifuge Node: %v", err)
}
brokerName := viper.GetString("broker")
if brokerName != "" && brokerName != "nats" {
log.Fatal().Msgf("unknown broker: %s", brokerName)
}
var e centrifuge.Engine
if engineName == "memory" {
e, err = memoryEngine(node)
} else if engineName == "redis" {
e, err = redisEngine(node)
} else {
log.Fatal().Msgf("unknown engine: %s", engineName)
}
if err != nil {
log.Fatal().Msgf("error creating engine: %v", err)
}
tokenVerifier := jwtverify.NewTokenVerifierJWT(jwtVerifierConfig())
clientHandler := client.NewHandler(node, ruleContainer, tokenVerifier, proxyConfig)
clientHandler.Setup()
node.SetEngine(e)
var disableHistoryPresence bool
if engineName == "memory" && brokerName == "nats" {
// Presence and History won't work with Memory engine in distributed case.
disableHistoryPresence = true
node.SetPresenceManager(nil)
}
if disableHistoryPresence {
log.Warn().Msgf("presence, history and recovery disabled with Memory engine and Nats broker")
}
if !configFound {
log.Warn().Msg("config file not found")
}
if brokerName == "nats" {
broker, err := natsbroker.New(node, natsbroker.Config{
URL: viper.GetString("nats_url"),
Prefix: viper.GetString("nats_prefix"),
DialTimeout: time.Duration(viper.GetInt("nats_dial_timeout")) * time.Second,
WriteTimeout: time.Duration(viper.GetInt("nats_write_timeout")) * time.Second,
})
if err != nil {
log.Fatal().Msgf("Error creating broker: %v", err)
}
node.SetBroker(broker)
}
if err = node.Run(); err != nil {
log.Fatal().Msgf("error running node: %v", err)
}
if proxyConfig.ConnectEndpoint != "" {
log.Info().Str("endpoint", proxyConfig.ConnectEndpoint).Msg("proxy connect over HTTP")
}
if proxyConfig.RefreshEndpoint != "" {
log.Info().Str("endpoint", proxyConfig.RefreshEndpoint).Msg("proxy refresh over HTTP")
}
if proxyConfig.RPCEndpoint != "" {
log.Info().Str("endpoint", proxyConfig.RPCEndpoint).Msg("proxy RPC over HTTP")
}
if proxyConfig.SubscribeEndpoint != "" {
log.Info().Str("endpoint", proxyConfig.SubscribeEndpoint).Msg("proxy subscribe over HTTP")
}
if proxyConfig.PublishEndpoint != "" {
log.Info().Str("endpoint", proxyConfig.PublishEndpoint).Msg("proxy publish over HTTP")
}
if viper.GetBool("client_insecure") {
log.Warn().Msg("INSECURE client mode enabled")
}
if viper.GetBool("api_insecure") {
log.Warn().Msg("INSECURE API mode enabled")
}
if viper.GetBool("admin_insecure") {
log.Warn().Msg("INSECURE admin mode enabled")
}
if viper.GetBool("debug") {
log.Warn().Msg("DEBUG mode enabled, see /debug/pprof")
}
var grpcAPIServer *grpc.Server
var grpcAPIAddr string
if viper.GetBool("grpc_api") {
grpcAPIAddr = fmt.Sprintf(":%d", viper.GetInt("grpc_api_port"))
grpcAPIConn, err := net.Listen("tcp", grpcAPIAddr)
if err != nil {
log.Fatal().Msgf("cannot listen to address %s", grpcAPIAddr)
}
var grpcOpts []grpc.ServerOption
var tlsConfig *tls.Config
var tlsErr error
if viper.GetString("grpc_api_key") != "" {
grpcOpts = append(grpcOpts, api.GRPCKeyAuth(viper.GetString("grpc_api_key")))
}
if viper.GetBool("grpc_api_tls") {
tlsConfig, tlsErr = tlsConfigForGRPC()
} else if !viper.GetBool("grpc_api_tls_disable") {
tlsConfig, tlsErr = getTLSConfig()
}
if tlsErr != nil {
log.Fatal().Msgf("error getting TLS config: %v", tlsErr)
}
if tlsConfig != nil {
grpcOpts = append(grpcOpts, grpc.Creds(credentials.NewTLS(tlsConfig)))
}
grpcAPIServer = grpc.NewServer(grpcOpts...)
apiExecutor := api.NewExecutor(node, ruleContainer, "grpc")
_ = api.RegisterGRPCServerAPI(node, apiExecutor, grpcAPIServer, api.GRPCAPIServiceConfig{})
go func() {
if err := grpcAPIServer.Serve(grpcAPIConn); err != nil {
log.Fatal().Msgf("serve GRPC: %v", err)
}
}()
}
if grpcAPIServer != nil {
log.Info().Msgf("serving GRPC API service on %s", grpcAPIAddr)
}
httpAPIExecutor := api.NewExecutor(node, ruleContainer, "http")
servers, err := runHTTPServers(node, httpAPIExecutor)
if err != nil {
log.Fatal().Msgf("error running HTTP server: %v", err)
}
var exporter *graphite.Exporter
if viper.GetBool("graphite") {
exporter = graphite.New(graphite.Config{
Address: net.JoinHostPort(viper.GetString("graphite_host"), strconv.Itoa(viper.GetInt("graphite_port"))),
Gatherer: prometheus.DefaultGatherer,
Prefix: strings.TrimSuffix(viper.GetString("graphite_prefix"), ".") + "." + graphite.PreparePathComponent(nodeConfig.Name),
Interval: time.Duration(viper.GetInt("graphite_interval")) * time.Second,
Tags: viper.GetBool("graphite_tags"),
})
}
handleSignals(configFile, node, ruleContainer, tokenVerifier, servers, grpcAPIServer, exporter)
},
}
rootCmd.Flags().StringVarP(&configFile, "config", "c", "config.json", "path to config file")
rootCmd.Flags().StringP("engine", "e", "memory", "engine to use: memory or redis")
rootCmd.Flags().StringP("broker", "", "", "custom broker to use: ex. nats")
rootCmd.Flags().StringP("log_level", "", "info", "set the log level: debug, info, error, fatal or none")
rootCmd.Flags().StringP("log_file", "", "", "optional log file - if not specified logs go to STDOUT")
rootCmd.Flags().StringP("pid_file", "", "", "optional path to create PID file")
rootCmd.Flags().StringP("name", "n", "", "unique node name")
rootCmd.Flags().BoolP("debug", "", false, "enable debug endpoints")
rootCmd.Flags().BoolP("admin", "", false, "enable admin web interface")
rootCmd.Flags().BoolP("admin_external", "", false, "enable admin web interface on external port")
rootCmd.Flags().BoolP("prometheus", "", false, "enable Prometheus metrics endpoint")
rootCmd.Flags().BoolP("health", "", false, "enable health check endpoint")
rootCmd.Flags().BoolP("client_insecure", "", false, "start in insecure client mode")
rootCmd.Flags().BoolP("api_insecure", "", false, "use insecure API mode")
rootCmd.Flags().BoolP("admin_insecure", "", false, "use insecure admin mode – no auth required for admin socket")
rootCmd.Flags().StringP("address", "a", "", "interface address to listen on")
// TODO v3: make ports integer.
rootCmd.Flags().StringP("port", "p", "8000", "port to bind HTTP server to")
rootCmd.Flags().StringP("internal_address", "", "", "custom interface address to listen on for internal endpoints")
rootCmd.Flags().StringP("internal_port", "", "", "custom port for internal endpoints")
rootCmd.Flags().BoolP("tls", "", false, "enable TLS, requires an X509 certificate and a key file")
rootCmd.Flags().StringP("tls_cert", "", "", "path to an X509 certificate file")
rootCmd.Flags().StringP("tls_key", "", "", "path to an X509 certificate key")
rootCmd.Flags().BoolP("tls_external", "", false, "enable TLS only for external endpoints")
rootCmd.Flags().BoolP("grpc_api", "", false, "enable GRPC API server")
rootCmd.Flags().IntP("grpc_api_port", "", 10000, "port to bind GRPC API server to")
rootCmd.Flags().BoolP("grpc_api_tls", "", false, "enable TLS for GRPC API server, requires an X509 certificate and a key file")
rootCmd.Flags().StringP("grpc_api_tls_cert", "", "", "path to an X509 certificate file for GRPC API server")
rootCmd.Flags().StringP("grpc_api_tls_key", "", "", "path to an X509 certificate key for GRPC API server")
rootCmd.Flags().BoolP("grpc_api_tls_disable", "", false, "disable general TLS for GRPC API server")
rootCmd.Flags().StringP("redis_host", "", "127.0.0.1", "Redis host (Redis engine)")
rootCmd.Flags().StringP("redis_port", "", "6379", "Redis port (Redis engine)")
rootCmd.Flags().StringP("redis_password", "", "", "Redis auth password (Redis engine)")
rootCmd.Flags().IntP("redis_db", "", 0, "Redis database (Redis engine)")
rootCmd.Flags().StringP("redis_url", "", "", "Redis connection URL in format redis://:password@hostname:port/db (Redis engine)")
rootCmd.Flags().BoolP("redis_tls", "", false, "enable Redis TLS connection")
rootCmd.Flags().BoolP("redis_tls_skip_verify", "", false, "disable Redis TLS host verification")
rootCmd.Flags().StringP("redis_master_name", "", "", "name of Redis master Sentinel monitors (Redis engine)")
rootCmd.Flags().StringP("redis_sentinels", "", "", "comma-separated list of Sentinel addresses (Redis engine)")
rootCmd.Flags().StringP("redis_sentinel_password", "", "", "Redis Sentinel auth password (Redis engine)")
rootCmd.Flags().StringP("nats_url", "", "", "Nats connection URL in format nats://user:pass@localhost:4222 (Nats broker)")
var versionCmd = &cobra.Command{
Use: "version",
Short: "Centrifugo version information",
Long: `Print the version information of Centrifugo`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Centrifugo v%s (Go version: %s)\n", VERSION, runtime.Version())
},
}
var checkConfigFile string
var checkConfigCmd = &cobra.Command{
Use: "checkconfig",
Short: "Check configuration file",
Long: `Check Centrifugo configuration file`,
Run: func(cmd *cobra.Command, args []string) {
bindConfig()
err := validateConfig(checkConfigFile)
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
},
}
checkConfigCmd.Flags().StringVarP(&checkConfigFile, "config", "c", "config.json", "path to config file to check")
var outputConfigFile string
var genConfigCmd = &cobra.Command{
Use: "genconfig",
Short: "Generate minimal configuration file to start with",
Long: `Generate minimal configuration file to start with`,
Run: func(cmd *cobra.Command, args []string) {
err := tools.GenerateConfig(outputConfigFile)
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
err = validateConfig(outputConfigFile)
if err != nil {
_ = os.Remove(outputConfigFile)
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
},
}
genConfigCmd.Flags().StringVarP(&outputConfigFile, "config", "c", "config.json", "path to output config file")
var genTokenConfigFile string
var genTokenUser string
var genTokenTTL int64
var genTokenCmd = &cobra.Command{
Use: "gentoken",
Short: "Generate sample connection JWT for user",
Long: `Generate sample connection JWT for user`,
Run: func(cmd *cobra.Command, args []string) {
bindConfig()
err := readConfig(genTokenConfigFile)
if err != nil && err != errConfigFileNotFound {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
jwtVerifierConfig := jwtVerifierConfig()
token, err := tools.GenerateToken(jwtVerifierConfig, genTokenUser, genTokenTTL)
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
var user = fmt.Sprintf("user %s", genTokenUser)
if genTokenUser == "" {
user = "anonymous user"
}
fmt.Printf("HMAC SHA-256 JWT for %s with expiration TTL %s:\n%s\n", user, time.Duration(genTokenTTL)*time.Second, token)
},
}
genTokenCmd.Flags().StringVarP(&genTokenConfigFile, "config", "c", "config.json", "path to config file")
genTokenCmd.Flags().StringVarP(&genTokenUser, "user", "u", "", "user ID")
genTokenCmd.Flags().Int64VarP(&genTokenTTL, "ttl", "t", 3600*24*7, "token TTL in seconds")
var checkTokenConfigFile string
var checkTokenCmd = &cobra.Command{
Use: "checktoken [TOKEN]",
Short: "Check connection JWT",
Long: `Check connection JWT`,
Run: func(cmd *cobra.Command, args []string) {
bindConfig()
err := readConfig(checkTokenConfigFile)
if err != nil && err != errConfigFileNotFound {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
jwtVerifierConfig := jwtVerifierConfig()
if len(args) != 1 {
fmt.Printf("error: provide token to check [centrifugo checktoken <TOKEN>]\n")
os.Exit(1)
}
subject, claims, err := tools.CheckToken(jwtVerifierConfig, args[0])
if err != nil {
fmt.Printf("error: %v\n", err)
os.Exit(1)
}
var user = fmt.Sprintf("user %s", subject)
if subject == "" {
user = "anonymous user"
}
fmt.Printf("valid token for %s\npayload: %s\n", user, string(claims))
},
}
checkTokenCmd.Flags().StringVarP(&checkTokenConfigFile, "config", "c", "config.json", "path to config file")
rootCmd.AddCommand(versionCmd)
rootCmd.AddCommand(checkConfigCmd)
rootCmd.AddCommand(genConfigCmd)
rootCmd.AddCommand(genTokenCmd)
rootCmd.AddCommand(checkTokenCmd)
_ = rootCmd.Execute()
}
func writePidFile(pidFile string) error {
if pidFile == "" {
return nil
}
pid := []byte(strconv.Itoa(os.Getpid()) + "\n")
return ioutil.WriteFile(pidFile, pid, 0644)
}
var logLevelMatches = map[string]zerolog.Level{
"NONE": zerolog.NoLevel,
"DEBUG": zerolog.DebugLevel,
"INFO": zerolog.InfoLevel,
"WARN": zerolog.WarnLevel,
"ERROR": zerolog.ErrorLevel,
"FATAL": zerolog.FatalLevel,
}
func detectTerminalAttached() {
if isatty.IsTerminal(os.Stdout.Fd()) && runtime.GOOS != "windows" {
log.Logger = log.Output(zerolog.ConsoleWriter{
Out: os.Stdout,
TimeFormat: "2006-01-02 15:04:05",
FormatLevel: logutils.ConsoleFormatLevel(),
FormatErrFieldName: logutils.ConsoleFormatErrFieldName(),
FormatErrFieldValue: logutils.ConsoleFormatErrFieldValue(),
})
}
}
func setupLogging() *os.File {
detectTerminalAttached()
zerolog.SetGlobalLevel(zerolog.InfoLevel)
logLevel, ok := logLevelMatches[strings.ToUpper(viper.GetString("log_level"))]
if !ok {
logLevel = zerolog.InfoLevel
}
zerolog.SetGlobalLevel(logLevel)
if viper.IsSet("log_file") && viper.GetString("log_file") != "" {
f, err := os.OpenFile(viper.GetString("log_file"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
log.Fatal().Msgf("error opening log file: %v", err)
}
log.Logger = log.Output(f)
return f
}
return nil
}
func handleSignals(configFile string, n *centrifuge.Node, ruleContainer *rule.Container, tokenVerifier *jwtverify.VerifierJWT, httpServers []*http.Server, grpcAPIServer *grpc.Server, exporter *graphite.Exporter) {
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGHUP, syscall.SIGINT, os.Interrupt, syscall.SIGTERM)
for {
sig := <-sigCh
log.Info().Msgf("signal received: %v", sig)
switch sig {
case syscall.SIGHUP:
// reload application configuration on SIGHUP.
log.Info().Msg("reloading configuration")
err := validateConfig(configFile)
if err != nil {
log.Error().Msgf("error parsing configuration: %s", err)
continue
}
ruleConfig := ruleConfig()
if err := tokenVerifier.Reload(jwtVerifierConfig()); err != nil {
log.Error().Msgf("error reloading: %v", err)
continue
}
if err := ruleContainer.Reload(ruleConfig); err != nil {
log.Error().Msgf("error reloading: %v", err)
continue
}
log.Info().Msg("configuration successfully reloaded")
case syscall.SIGINT, os.Interrupt, syscall.SIGTERM:
log.Info().Msg("shutting down ...")
pidFile := viper.GetString("pid_file")
shutdownTimeout := time.Duration(viper.GetInt("shutdown_timeout")) * time.Second
go time.AfterFunc(shutdownTimeout, func() {
if pidFile != "" {
_ = os.Remove(pidFile)
}
os.Exit(1)
})
if exporter != nil {
_ = exporter.Close()
}
var wg sync.WaitGroup
if grpcAPIServer != nil {
wg.Add(1)
go func() {
defer wg.Done()
grpcAPIServer.GracefulStop()
}()
}
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
for _, srv := range httpServers {
wg.Add(1)
go func(srv *http.Server) {
defer wg.Done()
_ = srv.Shutdown(ctx)
}(srv)
}
_ = n.Shutdown(ctx)
wg.Wait()
cancel()
if pidFile != "" {
_ = os.Remove(pidFile)
}
time.Sleep(time.Duration(viper.GetInt("shutdown_termination_delay")) * time.Second)
os.Exit(0)
}
}
}
var startHTTPChallengeServerOnce sync.Once // TODO: refactor to get rid of global here.
func getTLSConfig() (*tls.Config, error) {
tlsEnabled := viper.GetBool("tls")
tlsCert := viper.GetString("tls_cert")
tlsKey := viper.GetString("tls_key")
tlsAutocertEnabled := viper.GetBool("tls_autocert")
autocertHostWhitelist := viper.GetString("tls_autocert_host_whitelist")
var tlsAutocertHostWhitelist []string
if autocertHostWhitelist != "" {
tlsAutocertHostWhitelist = strings.Split(autocertHostWhitelist, ",")
} else {
tlsAutocertHostWhitelist = nil
}
tlsAutocertCacheDir := viper.GetString("tls_autocert_cache_dir")
tlsAutocertEmail := viper.GetString("tls_autocert_email")
tlsAutocertForceRSA := viper.GetBool("tls_autocert_force_rsa")
tlsAutocertServerName := viper.GetString("tls_autocert_server_name")
tlsAutocertHTTP := viper.GetBool("tls_autocert_http")
tlsAutocertHTTPAddr := viper.GetString("tls_autocert_http_addr")
if tlsAutocertEnabled {
certManager := autocert.Manager{
Prompt: autocert.AcceptTOS,
ForceRSA: tlsAutocertForceRSA,
Email: tlsAutocertEmail,
}
if tlsAutocertHostWhitelist != nil {
certManager.HostPolicy = autocert.HostWhitelist(tlsAutocertHostWhitelist...)
}
if tlsAutocertCacheDir != "" {
certManager.Cache = autocert.DirCache(tlsAutocertCacheDir)
}
if tlsAutocertHTTP {
startHTTPChallengeServerOnce.Do(func() {
// getTLSConfig can be called several times.
acmeHTTPServer := &http.Server{
Handler: certManager.HTTPHandler(nil),
Addr: tlsAutocertHTTPAddr,
ErrorLog: stdlog.New(&httpErrorLogWriter{log.Logger}, "", 0),
}
go func() {
log.Info().Msgf("serving ACME http_01 challenge on %s", tlsAutocertHTTPAddr)
if err := acmeHTTPServer.ListenAndServe(); err != nil {
log.Fatal().Msgf("can't create server on %s to serve acme http challenge: %v", tlsAutocertHTTPAddr, err)
}
}()
})
}
return &tls.Config{
GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
// See https://github.com/centrifugal/centrifugo/issues/144#issuecomment-279393819
if tlsAutocertServerName != "" && hello.ServerName == "" {
hello.ServerName = tlsAutocertServerName
}
return certManager.GetCertificate(hello)
},
NextProtos: []string{
"h2", "http/1.1", acme.ALPNProto,
},
}, nil
} else if tlsEnabled {
// Autocert disabled - just try to use provided SSL cert and key files.
tlsConfig := &tls.Config{}
tlsConfig = tlsConfig.Clone()
tlsConfig.Certificates = make([]tls.Certificate, 1)
var err error
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(tlsCert, tlsKey)
if err != nil {
return nil, err
}
return tlsConfig, nil
}
return nil, nil
}
func tlsConfigForGRPC() (*tls.Config, error) {
tlsCert := viper.GetString("grpc_api_tls_cert")
tlsKey := viper.GetString("grpc_api_tls_key")
tlsConfig := &tls.Config{}
tlsConfig.Certificates = make([]tls.Certificate, 1)
var err error
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(tlsCert, tlsKey)
if err != nil {
return nil, err
}
return tlsConfig, nil
}
type httpErrorLogWriter struct {
zerolog.Logger
}
func (w *httpErrorLogWriter) Write(data []byte) (int, error) {
w.Logger.Warn().Msg(strings.TrimSpace(string(data)))
return len(data), nil
}
func runHTTPServers(n *centrifuge.Node, apiExecutor *api.Executor) ([]*http.Server, error) {
debug := viper.GetBool("debug")
useAdmin := viper.GetBool("admin")
usePrometheus := viper.GetBool("prometheus")
useHealth := viper.GetBool("health")
adminExternal := viper.GetBool("admin_external")
httpAddress := viper.GetString("address")
httpPort := viper.GetString("port")
httpInternalAddress := viper.GetString("internal_address")
httpInternalPort := viper.GetString("internal_port")
if httpInternalAddress == "" && httpAddress != "" {
// If custom internal address not explicitly set we try to reuse main
// address for internal endpoints too.
httpInternalAddress = httpAddress
}
if httpInternalPort == "" {
// If custom internal port not set we use default http port for
// internal endpoints too.
httpInternalPort = httpPort
}
// addrToHandlerFlags contains mapping between HTTP server address and
// handler flags to serve on this address.
addrToHandlerFlags := map[string]HandlerFlag{}
var portFlags HandlerFlag
externalAddr := net.JoinHostPort(httpAddress, httpPort)
portFlags = addrToHandlerFlags[externalAddr]
if !viper.GetBool("websocket_disable") {
portFlags |= HandlerWebsocket
}
if !viper.GetBool("sockjs_disable") {
portFlags |= HandlerSockJS
}
if useAdmin && adminExternal {
portFlags |= HandlerAdmin
}
addrToHandlerFlags[externalAddr] = portFlags
internalAddr := net.JoinHostPort(httpInternalAddress, httpInternalPort)
portFlags = addrToHandlerFlags[internalAddr]
if !viper.GetBool("api_disable") {
portFlags |= HandlerAPI
}
if useAdmin && !adminExternal {
portFlags |= HandlerAdmin
}
if usePrometheus {
portFlags |= HandlerPrometheus
}
if debug {
portFlags |= HandlerDebug
}
if useHealth {
portFlags |= HandlerHealth
}
addrToHandlerFlags[internalAddr] = portFlags
var servers []*http.Server
tlsConfig, err := getTLSConfig()
if err != nil {
log.Fatal().Msgf("can not get TLS config: %v", err)
}
// Iterate over port to flags mapping and start HTTP servers
// on separate ports serving handlers specified in flags.
for addr, handlerFlags := range addrToHandlerFlags {
if handlerFlags == 0 {
continue
}
mux := Mux(n, apiExecutor, handlerFlags)
log.Info().Msgf("serving %s endpoints on %s", handlerFlags, addr)
var addrTLSConfig *tls.Config
if !viper.GetBool("tls_external") || addr == externalAddr {
addrTLSConfig = tlsConfig
}
server := &http.Server{
Addr: addr,
Handler: mux,
TLSConfig: addrTLSConfig,
ErrorLog: stdlog.New(&httpErrorLogWriter{log.Logger}, "", 0),
}
servers = append(servers, server)
go func() {
if addrTLSConfig != nil {
if err := server.ListenAndServeTLS("", ""); err != nil {
if err != http.ErrServerClosed {
log.Fatal().Msgf("ListenAndServe: %v", err)
}
}
} else {
if err := server.ListenAndServe(); err != nil {
if err != http.ErrServerClosed {
log.Fatal().Msgf("ListenAndServe: %v", err)
}
}
}
}()
}
return servers, nil
}
var errConfigFileNotFound = errors.New("unable to find configuration file")
// readConfig reads config.
func readConfig(f string) error {
viper.SetConfigFile(f)
err := viper.ReadInConfig()
if err != nil {
switch err.(type) {
case viper.ConfigParseError:
return err
default:
return errConfigFileNotFound
}
}
return nil
}
// validateConfig validates config file located at provided path.
func validateConfig(f string) error {
err := readConfig(f)
if err != nil {
return err
}
ruleConfig := ruleConfig()
if err := ruleConfig.Validate(); err != nil {
return err
}
return nil
}
func ruleConfig() rule.Config {
v := viper.GetViper()
cfg := rule.Config{}
cfg.Publish = v.GetBool("publish")
cfg.SubscribeToPublish = v.GetBool("subscribe_to_publish")
cfg.Anonymous = v.GetBool("anonymous")
cfg.Presence = v.GetBool("presence")
cfg.PresenceDisableForClient = v.GetBool("presence_disable_for_client")
cfg.JoinLeave = v.GetBool("join_leave")
cfg.HistorySize = v.GetInt("history_size")
cfg.HistoryLifetime = v.GetInt("history_lifetime")
cfg.HistoryRecover = v.GetBool("history_recover")
cfg.HistoryDisableForClient = v.GetBool("history_disable_for_client")
cfg.ServerSide = v.GetBool("server_side")
cfg.ProxySubscribe = v.GetBool("proxy_subscribe")
cfg.ProxyPublish = v.GetBool("proxy_publish")
cfg.Namespaces = namespacesFromConfig(v)
// TODO v3: replace option name to token_channel_prefix.
cfg.TokenChannelPrefix = v.GetString("channel_private_prefix")
cfg.ChannelNamespaceBoundary = v.GetString("channel_namespace_boundary")
cfg.ChannelUserBoundary = v.GetString("channel_user_boundary")
cfg.ChannelUserSeparator = v.GetString("channel_user_separator")
cfg.UserSubscribeToPersonal = v.GetBool("user_subscribe_to_personal")
cfg.UserPersonalSingleConnection = v.GetBool("user_personal_single_connection")
cfg.UserPersonalChannelNamespace = v.GetString("user_personal_channel_namespace")
cfg.ClientInsecure = v.GetBool("client_insecure")
cfg.ClientAnonymous = v.GetBool("client_anonymous")
cfg.ClientConcurrency = v.GetInt("client_concurrency")
return cfg
}
func jwtVerifierConfig() jwtverify.VerifierConfig {
v := viper.GetViper()
cfg := jwtverify.VerifierConfig{}
hmacSecretKey := v.GetString("token_hmac_secret_key")
if hmacSecretKey != "" {
cfg.HMACSecretKey = hmacSecretKey
} else {
if v.GetString("secret") != "" {
log.Warn().Msg("secret is deprecated, use token_hmac_secret_key instead")
}
cfg.HMACSecretKey = v.GetString("secret")
}
rsaPublicKey := v.GetString("token_rsa_public_key")
if rsaPublicKey != "" {
pubKey, err := jwtutils.ParseRSAPublicKeyFromPEM([]byte(rsaPublicKey))
if err != nil {
log.Fatal().Msgf("error parsing RSA public key: %v", err)
}
cfg.RSAPublicKey = pubKey
}
cfg.JWKSPublicEndpoint = v.GetString("token_jwks_public_endpoint")
return cfg
}
func proxyConfig() (proxy.Config, bool) {
v := viper.GetViper()
cfg := proxy.Config{}
cfg.ExtraHTTPHeaders = v.GetStringSlice("proxy_extra_http_headers")
cfg.ConnectEndpoint = v.GetString("proxy_connect_endpoint")
cfg.ConnectTimeout = time.Duration(v.GetFloat64("proxy_connect_timeout")*1000) * time.Millisecond
cfg.RefreshEndpoint = v.GetString("proxy_refresh_endpoint")
cfg.RefreshTimeout = time.Duration(v.GetFloat64("proxy_refresh_timeout")*1000) * time.Millisecond
cfg.RPCEndpoint = v.GetString("proxy_rpc_endpoint")
cfg.RPCTimeout = time.Duration(v.GetFloat64("proxy_rpc_timeout")*1000) * time.Millisecond
cfg.SubscribeEndpoint = v.GetString("proxy_subscribe_endpoint")
cfg.SubscribeTimeout = time.Duration(v.GetFloat64("proxy_subscribe_timeout")*1000) * time.Millisecond
cfg.PublishEndpoint = v.GetString("proxy_publish_endpoint")
cfg.PublishTimeout = time.Duration(v.GetFloat64("proxy_publish_timeout")*1000) * time.Millisecond
proxyEnabled := cfg.ConnectEndpoint != "" || cfg.RefreshEndpoint != "" ||
cfg.RPCEndpoint != "" || cfg.SubscribeEndpoint != "" || cfg.PublishEndpoint != ""
return cfg, proxyEnabled
}
func nodeConfig(version string) centrifuge.Config {
v := viper.GetViper()
cfg := centrifuge.Config{}
cfg.Version = version
cfg.MetricsNamespace = "centrifugo"
cfg.Name = applicationName()
cfg.ChannelMaxLength = v.GetInt("channel_max_length")
cfg.ClientPresenceUpdateInterval = time.Duration(v.GetInt("client_presence_ping_interval")) * time.Second
cfg.ClientPresenceExpireInterval = time.Duration(v.GetInt("client_presence_expire_interval")) * time.Second
cfg.ClientExpiredCloseDelay = time.Duration(v.GetInt("client_expired_close_delay")) * time.Second
cfg.ClientExpiredSubCloseDelay = time.Duration(v.GetInt("client_expired_sub_close_delay")) * time.Second
cfg.ClientStaleCloseDelay = time.Duration(v.GetInt("client_stale_close_delay")) * time.Second
cfg.ClientQueueMaxSize = v.GetInt("client_queue_max_size")
cfg.ClientChannelLimit = v.GetInt("client_channel_limit")
cfg.ClientChannelPositionCheckDelay = time.Duration(v.GetInt("client_channel_position_check_delay")) * time.Second
cfg.UserConnectionLimit = v.GetInt("client_user_connection_limit")
cfg.NodeInfoMetricsAggregateInterval = time.Duration(v.GetInt("node_info_metrics_aggregate_interval")) * time.Second
level, ok := logStringToLevel[strings.ToLower(v.GetString("log_level"))]
if !ok {
level = centrifuge.LogLevelInfo
}
cfg.LogLevel = level
cfg.LogHandler = newLogHandler().handle
return cfg
}
// LogStringToLevel matches level string to Centrifuge LogLevel.
var logStringToLevel = map[string]centrifuge.LogLevel{
"debug": centrifuge.LogLevelDebug,
"info": centrifuge.LogLevelInfo,
"error": centrifuge.LogLevelError,
"none": centrifuge.LogLevelNone,
}
// applicationName returns a name for this centrifuge. If no name provided
// in configuration then it constructs node name based on hostname and port
func applicationName() string {
v := viper.GetViper()
name := v.GetString("name")
if name != "" {
return name
}
port := v.GetString("port")
var hostname string
hostname, err := os.Hostname()
if err != nil {
hostname = "?"
}
return hostname + "_" + port
}
// namespacesFromConfig allows to unmarshal channel namespaces.
func namespacesFromConfig(v *viper.Viper) []rule.ChannelNamespace {
var ns []rule.ChannelNamespace
if !v.IsSet("namespaces") {
return ns
}
var err error
switch val := v.Get("namespaces").(type) {
case string:
err = json.Unmarshal([]byte(val), &ns)
case []interface{}:
err = v.UnmarshalKey("namespaces", &ns)
default:
err = fmt.Errorf("unknown namespaces type: %T", val)
}
if err != nil {
log.Error().Err(err).Msg("malformed namespaces")
os.Exit(1)
}
return ns
}
func websocketHandlerConfig() centrifuge.WebsocketConfig {
v := viper.GetViper()
cfg := centrifuge.WebsocketConfig{}
cfg.Compression = v.GetBool("websocket_compression")
cfg.CompressionLevel = v.GetInt("websocket_compression_level")
cfg.CompressionMinSize = v.GetInt("websocket_compression_min_size")
cfg.ReadBufferSize = v.GetInt("websocket_read_buffer_size")
cfg.WriteBufferSize = v.GetInt("websocket_write_buffer_size")
cfg.UseWriteBufferPool = v.GetBool("websocket_use_write_buffer_pool")
if v.IsSet("websocket_ping_interval") {
cfg.PingInterval = time.Duration(v.GetInt("websocket_ping_interval")) * time.Second
} else {
cfg.PingInterval = time.Duration(v.GetInt("client_ping_interval")) * time.Second
}
if v.IsSet("websocket_write_timeout") {
cfg.WriteTimeout = time.Duration(v.GetInt("websocket_write_timeout")) * time.Second
} else {
cfg.WriteTimeout = time.Duration(v.GetInt("client_message_write_timeout")) * time.Second
}
if v.IsSet("websocket_message_size_limit") {
cfg.MessageSizeLimit = v.GetInt("websocket_message_size_limit")
} else {
cfg.MessageSizeLimit = v.GetInt("client_request_max_size")
}
return cfg
}
func sockjsHandlerConfig() centrifuge.SockjsConfig {
v := viper.GetViper()
cfg := centrifuge.SockjsConfig{}
cfg.URL = v.GetString("sockjs_url")
cfg.HeartbeatDelay = time.Duration(v.GetInt("sockjs_heartbeat_delay")) * time.Second
cfg.CheckOrigin = func(*http.Request) bool { return true }
cfg.WebsocketCheckOrigin = func(r *http.Request) bool { return true }
cfg.WebsocketReadBufferSize = v.GetInt("websocket_read_buffer_size")
cfg.WebsocketWriteBufferSize = v.GetInt("websocket_write_buffer_size")
cfg.WebsocketUseWriteBufferPool = v.GetBool("websocket_use_write_buffer_pool")
if v.IsSet("websocket_write_timeout") {
cfg.WebsocketWriteTimeout = time.Duration(v.GetInt("websocket_write_timeout")) * time.Second
} else {
cfg.WebsocketWriteTimeout = time.Duration(v.GetInt("client_message_write_timeout")) * time.Second
}
return cfg
}
func adminHandlerConfig() admin.Config {
v := viper.GetViper()
cfg := admin.Config{}
cfg.WebFS = webui.FS
cfg.WebPath = v.GetString("admin_web_path")
cfg.Password = v.GetString("admin_password")
cfg.Secret = v.GetString("admin_secret")
cfg.Insecure = v.GetBool("admin_insecure")
cfg.Prefix = v.GetString("admin_handler_prefix")
return cfg
}
func memoryEngine(n *centrifuge.Node) (centrifuge.Engine, error) {
c, err := memoryEngineConfig()
if err != nil {
return nil, err
}
return centrifuge.NewMemoryEngine(n, *c)
}
func redisEngine(n *centrifuge.Node) (centrifuge.Engine, error) {
c, err := redisEngineConfig()
if err != nil {
return nil, err
}
return centrifuge.NewRedisEngine(n, *c)
}
func memoryEngineConfig() (*centrifuge.MemoryEngineConfig, error) {
return ¢rifuge.MemoryEngineConfig{
HistoryMetaTTL: time.Duration(viper.GetInt("memory_history_meta_ttl")) * time.Second,
}, nil
}
func addRedisShardCommonSettings(shardConf *centrifuge.RedisShardConfig) {
v := viper.GetViper()
shardConf.Password = v.GetString("redis_password")
shardConf.Prefix = v.GetString("redis_prefix")
shardConf.UseTLS = v.GetBool("redis_tls")
shardConf.TLSSkipVerify = v.GetBool("redis_tls_skip_verify")
shardConf.IdleTimeout = time.Duration(v.GetInt("redis_idle_timeout")) * time.Second
shardConf.PubSubNumWorkers = v.GetInt("redis_pubsub_num_workers")
shardConf.ConnectTimeout = time.Duration(v.GetInt("redis_connect_timeout")) * time.Second
shardConf.ReadTimeout = time.Duration(v.GetInt("redis_read_timeout")) * time.Second
shardConf.WriteTimeout = time.Duration(v.GetInt("redis_write_timeout")) * time.Second
}
func redisEngineConfig() (*centrifuge.RedisEngineConfig, error) {
v := viper.GetViper()
clusterConf := v.GetStringSlice("redis_cluster_addrs")
var useCluster bool
if len(clusterConf) > 0 {
useCluster = true
}
var shardConfigs []centrifuge.RedisShardConfig
if useCluster {
for _, clusterAddrsStr := range clusterConf {
clusterAddrs := strings.Split(clusterAddrsStr, ",")
conf := ¢rifuge.RedisShardConfig{
ClusterAddrs: clusterAddrs,
}
addRedisShardCommonSettings(conf)
shardConfigs = append(shardConfigs, *conf)
}
} else {
numShards := 1
hostsConf := v.GetString("redis_host")
portsConf := v.GetString("redis_port")
urlsConf := v.GetString("redis_url")
masterNamesConf := v.GetString("redis_master_name")
sentinelsConf := v.GetString("redis_sentinels")
password := v.GetString("redis_password")
db := v.GetInt("redis_db")
sentinelPassword := v.GetString("redis_sentinel_password")
var hosts []string
if hostsConf != "" {
hosts = strings.Split(hostsConf, ",")
if len(hosts) > numShards {
numShards = len(hosts)
}
}
var ports []string
if portsConf != "" {
ports = strings.Split(portsConf, ",")
if len(ports) > numShards {
numShards = len(ports)
}
}
var urls []string
if urlsConf != "" {
urls = strings.Split(urlsConf, ",")
if len(urls) > numShards {
numShards = len(urls)
}
}
var masterNames []string
if masterNamesConf != "" {
masterNames = strings.Split(masterNamesConf, ",")
if len(masterNames) > numShards {
numShards = len(masterNames)
}
}
if masterNamesConf != "" && sentinelsConf == "" {
return nil, fmt.Errorf("provide at least one Sentinel address")
}
if masterNamesConf != "" && len(masterNames) < numShards {
return nil, fmt.Errorf("master name must be set for every Redis shard when Sentinel used")
}
var sentinelAddrs []string
if sentinelsConf != "" {
for _, addr := range strings.Split(sentinelsConf, ",") {
addr := strings.TrimSpace(addr)
if addr == "" {
continue
}
if _, _, err := net.SplitHostPort(addr); err != nil {
return nil, fmt.Errorf("malformed Sentinel address: %s", addr)
}
sentinelAddrs = append(sentinelAddrs, addr)
}
}
if len(hosts) <= 1 {
newHosts := make([]string, numShards)
for i := 0; i < numShards; i++ {
if len(hosts) == 0 {
newHosts[i] = ""
} else {
newHosts[i] = hosts[0]
}
}
hosts = newHosts
} else if len(hosts) != numShards {
return nil, fmt.Errorf("malformed sharding configuration: wrong number of redis hosts")
}
if len(ports) <= 1 {
newPorts := make([]string, numShards)
for i := 0; i < numShards; i++ {
if len(ports) == 0 {
newPorts[i] = ""
} else {
newPorts[i] = ports[0]
}
}
ports = newPorts
} else if len(ports) != numShards {
return nil, fmt.Errorf("malformed sharding configuration: wrong number of redis ports")
}
if len(urls) > 0 && len(urls) != numShards {
return nil, fmt.Errorf("malformed sharding configuration: wrong number of redis urls")
}
if len(masterNames) == 0 {
newMasterNames := make([]string, numShards)
for i := 0; i < numShards; i++ {
newMasterNames[i] = ""
}
masterNames = newMasterNames
}
passwords := make([]string, numShards)
sentinelPasswords := make([]string, numShards)
for i := 0; i < numShards; i++ {
passwords[i] = password
sentinelPasswords[i] = sentinelPassword
}
dbs := make([]int, numShards)
for i := 0; i < numShards; i++ {
dbs[i] = db
}
for i, confURL := range urls {
if confURL == "" {
continue
}
// If URL set then prefer it over other parameters.
u, err := url.Parse(confURL)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
if u.User != nil {
var ok bool
pass, ok := u.User.Password()
if !ok {
pass = ""
}
passwords[i] = pass
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
path := u.Path
if path != "" {
dbNum, err := strconv.Atoi(path[1:])
if err != nil {
return nil, fmt.Errorf("malformed Redis db number: %s", path[1:])
}
dbs[i] = dbNum
}
hosts[i] = host
ports[i] = port
}
for i := 0; i < numShards; i++ {
port, err := strconv.Atoi(ports[i])
if err != nil {
return nil, fmt.Errorf("malformed port: %v", err)
}
conf := ¢rifuge.RedisShardConfig{
Host: hosts[i],
Port: port,
DB: dbs[i],
SentinelMasterName: masterNames[i],
SentinelAddrs: sentinelAddrs,
}
addRedisShardCommonSettings(conf)
conf.Password = passwords[i]
conf.SentinelPassword = sentinelPasswords[i]
shardConfigs = append(shardConfigs, *conf)
}
}
var historyMetaTTL time.Duration
if v.IsSet("redis_history_meta_ttl") {
historyMetaTTL = time.Duration(v.GetInt("redis_history_meta_ttl")) * time.Second
} else {
// TODO v3: remove compatibility.
historyMetaTTL = time.Duration(v.GetInt("redis_sequence_ttl")) * time.Second
}
return ¢rifuge.RedisEngineConfig{
UseStreams: v.GetBool("redis_streams"),
HistoryMetaTTL: historyMetaTTL,
Shards: shardConfigs,
}, nil
}
type logHandler struct {
entries chan centrifuge.LogEntry
}
func newLogHandler() *logHandler {
h := &logHandler{
entries: make(chan centrifuge.LogEntry, 64),
}
go h.readEntries()
return h
}
func (h *logHandler) readEntries() {
for entry := range h.entries {
var l *zerolog.Event
switch entry.Level {
case centrifuge.LogLevelDebug:
l = log.Debug()
case centrifuge.LogLevelInfo:
l = log.Info()
case centrifuge.LogLevelError:
l = log.Error()
default:
continue
}
if entry.Fields != nil {
l.Fields(entry.Fields).Msg(entry.Message)
} else {
l.Msg(entry.Message)
}
}
}
func (h *logHandler) handle(entry centrifuge.LogEntry) {
select {
case h.entries <- entry:
default:
return
}
}
// HandlerFlag is a bit mask of handlers that must be enabled in mux.
type HandlerFlag int
const (
// HandlerWebsocket enables Raw Websocket handler.
HandlerWebsocket HandlerFlag = 1 << iota
// HandlerSockJS enables SockJS handler.
HandlerSockJS
// HandlerAPI enables API handler.
HandlerAPI
// HandlerAdmin enables admin web interface.
HandlerAdmin
// HandlerDebug enables debug handlers.
HandlerDebug
// HandlerPrometheus enables Prometheus handler.
HandlerPrometheus
// HandlerHealth enables Health check endpoint.
HandlerHealth
)
var handlerText = map[HandlerFlag]string{
HandlerWebsocket: "websocket",
HandlerSockJS: "SockJS",
HandlerAPI: "API",
HandlerAdmin: "admin",
HandlerDebug: "debug",
HandlerPrometheus: "prometheus",
HandlerHealth: "health",
}
func (flags HandlerFlag) String() string {
flagsOrdered := []HandlerFlag{HandlerWebsocket, HandlerSockJS, HandlerAPI, HandlerAdmin, HandlerPrometheus, HandlerDebug, HandlerHealth}
var endpoints []string
for _, flag := range flagsOrdered {
text, ok := handlerText[flag]
if !ok {
continue
}
if flags&flag != 0 {
endpoints = append(endpoints, text)
}
}
return strings.Join(endpoints, ", ")
}
// Mux returns a mux including set of default handlers for Centrifugo server.
func Mux(n *centrifuge.Node, apiExecutor *api.Executor, flags HandlerFlag) *http.ServeMux {
mux := http.NewServeMux()
v := viper.GetViper()
if flags&HandlerDebug != 0 {
mux.Handle("/debug/pprof/", middleware.LogRequest(http.HandlerFunc(pprof.Index)))
mux.Handle("/debug/pprof/cmdline", middleware.LogRequest(http.HandlerFunc(pprof.Cmdline)))
mux.Handle("/debug/pprof/profile", middleware.LogRequest(http.HandlerFunc(pprof.Profile)))
mux.Handle("/debug/pprof/symbol", middleware.LogRequest(http.HandlerFunc(pprof.Symbol)))
mux.Handle("/debug/pprof/trace", middleware.LogRequest(http.HandlerFunc(pprof.Trace)))
}
_, proxyEnabled := proxyConfig()
if flags&HandlerWebsocket != 0 {
// register Websocket connection endpoint.
wsPrefix := strings.TrimRight(v.GetString("websocket_handler_prefix"), "/")
if wsPrefix == "" {
wsPrefix = "/"
}
mux.Handle(wsPrefix, middleware.LogRequest(middleware.HeadersToContext(proxyEnabled, centrifuge.NewWebsocketHandler(n, websocketHandlerConfig()))))
}
if flags&HandlerSockJS != 0 {
// register SockJS connection endpoints.
sockjsConfig := sockjsHandlerConfig()
sockjsPrefix := strings.TrimRight(v.GetString("sockjs_handler_prefix"), "/")
sockjsConfig.HandlerPrefix = sockjsPrefix
mux.Handle(sockjsPrefix+"/", middleware.LogRequest(middleware.HeadersToContext(proxyEnabled, centrifuge.NewSockjsHandler(n, sockjsConfig))))
}
if flags&HandlerAPI != 0 {
// register HTTP API endpoint.
apiHandler := api.NewHandler(n, apiExecutor, api.Config{})
apiPrefix := strings.TrimRight(v.GetString("api_handler_prefix"), "/")
if apiPrefix == "" {
apiPrefix = "/"
}
if viper.GetBool("api_insecure") {
mux.Handle(apiPrefix, middleware.LogRequest(middleware.Post(apiHandler)))
} else {
mux.Handle(apiPrefix, middleware.LogRequest(middleware.Post(middleware.APIKeyAuth(viper.GetString("api_key"), apiHandler))))
}
}
if flags&HandlerPrometheus != 0 {
// register Prometheus metrics export endpoint.
prometheusPrefix := strings.TrimRight(v.GetString("prometheus_handler_prefix"), "/")
if prometheusPrefix == "" {
prometheusPrefix = "/"
}
mux.Handle(prometheusPrefix, middleware.LogRequest(promhttp.Handler()))
}
if flags&HandlerAdmin != 0 {
// register admin web interface API endpoints.
adminPrefix := strings.TrimRight(v.GetString("admin_handler_prefix"), "/")
mux.Handle(adminPrefix+"/", middleware.LogRequest(admin.NewHandler(n, apiExecutor, adminHandlerConfig())))
}
if flags&HandlerHealth != 0 {
healthPrefix := strings.TrimRight(v.GetString("health_handler_prefix"), "/")
if healthPrefix == "" {
healthPrefix = "/"
}
mux.Handle(healthPrefix, middleware.LogRequest(health.NewHandler(n, health.Config{})))
}
return mux
}
| [
"\"GOMAXPROCS\""
]
| []
| [
"GOMAXPROCS"
]
| [] | ["GOMAXPROCS"] | go | 1 | 0 | |
integration-cli/docker_cli_build_test.go | package main
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
"time"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/integration/checker"
"github.com/docker/docker/pkg/stringutils"
"github.com/go-check/check"
)
func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) {
name := "testbuildjsonemptyrun"
_, err := buildImage(
name,
`
FROM busybox
RUN []
`,
true)
if err != nil {
c.Fatal("error when dealing with a RUN statement with empty JSON array")
}
}
func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) {
name := "testbuildshcmdjsonentrypoint"
_, err := buildImage(
name,
`
FROM busybox
ENTRYPOINT ["echo"]
CMD echo test
`,
true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--rm", name)
if daemonPlatform == "windows" {
if !strings.Contains(out, "cmd /S /C echo test") {
c.Fatalf("CMD did not contain cmd /S /C echo test : %q", out)
}
} else {
if strings.TrimSpace(out) != "/bin/sh -c echo test" {
c.Fatalf("CMD did not contain /bin/sh -c : %q", out)
}
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) {
// Windows does not support FROM scratch or the USER command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
_, err := buildImage(name, `
FROM scratch
ENV user foo
USER ${user}
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.User")
if res != `"foo"` {
c.Fatal("User foo from environment not in Config.User on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) {
name := "testbuildenvironmentreplacement"
var volumePath string
if daemonPlatform == "windows" {
volumePath = "c:/quux"
} else {
volumePath = "/quux"
}
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
ENV volume `+volumePath+`
VOLUME ${volume}
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Volumes")
var volumes map[string]interface{}
if err := json.Unmarshal([]byte(res), &volumes); err != nil {
c.Fatal(err)
}
if _, ok := volumes[volumePath]; !ok {
c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) {
// Windows does not support FROM scratch or the EXPOSE command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
_, err := buildImage(name, `
FROM scratch
ENV port 80
EXPOSE ${port}
ENV ports " 99 100 "
EXPOSE ${ports}
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.ExposedPorts")
var exposedPorts map[string]interface{}
if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
c.Fatal(err)
}
exp := []int{80, 99, 100}
for _, p := range exp {
tmp := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[tmp]; !ok {
c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p)
}
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) {
name := "testbuildenvironmentreplacement"
_, err := buildImage(name, `
FROM busybox
ENV MYWORKDIR /work
RUN mkdir ${MYWORKDIR}
WORKDIR ${MYWORKDIR}
`, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) {
name := "testbuildenvironmentreplacement"
ctx, err := fakeContext(`
FROM `+minimalBaseImage()+`
ENV baz foo
ENV quux bar
ENV dot .
ENV fee fff
ENV gee ggg
ADD ${baz} ${dot}
COPY ${quux} ${dot}
ADD ${zzz:-${fee}} ${dot}
COPY ${zzz:-${gee}} ${dot}
`,
map[string]string{
"foo": "test1",
"bar": "test2",
"fff": "test3",
"ggg": "test4",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
_, err := buildImage(name,
`
FROM busybox
ENV foo zzz
ENV bar ${foo}
ENV abc1='$foo'
ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}"
RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo)
ENV abc2="\$foo"
RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo)
ENV abc3 '$foo'
RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo)
ENV abc4 "\$foo"
RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo)
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Env")
envResult := []string{}
if err = unmarshalJSON([]byte(res), &envResult); err != nil {
c.Fatal(err)
}
found := false
envCount := 0
for _, env := range envResult {
parts := strings.SplitN(env, "=", 2)
if parts[0] == "bar" {
found = true
if parts[1] != "zzz" {
c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "zzz" {
c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "foo" {
c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
}
}
}
if !found {
c.Fatal("Never found the `bar` env variable")
}
if envCount != 4 {
c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult)
}
}
func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) {
// The volume paths used in this test are invalid on Windows
testRequires(c, DaemonIsLinux)
name := "testbuildhandleescapes"
_, err := buildImage(name,
`
FROM scratch
ENV FOO bar
VOLUME ${FOO}
`, true)
if err != nil {
c.Fatal(err)
}
var result map[string]map[string]struct{}
res := inspectFieldJSON(c, name, "Config.Volumes")
if err = unmarshalJSON([]byte(res), &result); err != nil {
c.Fatal(err)
}
if _, ok := result["bar"]; !ok {
c.Fatal("Could not find volume bar set from env foo in volumes table")
}
deleteImages(name)
_, err = buildImage(name,
`
FROM scratch
ENV FOO bar
VOLUME \${FOO}
`, true)
if err != nil {
c.Fatal(err)
}
res = inspectFieldJSON(c, name, "Config.Volumes")
if err = unmarshalJSON([]byte(res), &result); err != nil {
c.Fatal(err)
}
if _, ok := result["${FOO}"]; !ok {
c.Fatal("Could not find volume ${FOO} set from env foo in volumes table")
}
deleteImages(name)
// this test in particular provides *7* backslashes and expects 6 to come back.
// Like above, the first escape is swallowed and the rest are treated as
// literals, this one is just less obvious because of all the character noise.
_, err = buildImage(name,
`
FROM scratch
ENV FOO bar
VOLUME \\\\\\\${FOO}
`, true)
if err != nil {
c.Fatal(err)
}
res = inspectFieldJSON(c, name, "Config.Volumes")
if err = unmarshalJSON([]byte(res), &result); err != nil {
c.Fatal(err)
}
if _, ok := result[`\\\${FOO}`]; !ok {
c.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result)
}
}
func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) {
name := "testbuildonbuildlowercase"
name2 := "testbuildonbuildlowercase2"
_, err := buildImage(name,
`
FROM busybox
onbuild run echo quux
`, true)
if err != nil {
c.Fatal(err)
}
_, out, err := buildImageWithOut(name2, fmt.Sprintf(`
FROM %s
`, name), true)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "quux") {
c.Fatalf("Did not receive the expected echo text, got %s", out)
}
if strings.Contains(out, "ONBUILD ONBUILD") {
c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out)
}
}
func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvescapes"
_, err := buildImage(name,
`
FROM busybox
ENV TEST foo
CMD echo \$
`,
true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "-t", name)
if strings.TrimSpace(out) != "$" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvoverwrite"
_, err := buildImage(name,
`
FROM busybox
ENV TEST foo
CMD echo ${TEST}
`,
true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name)
if strings.TrimSpace(out) != "bar" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainerInSourceImage(c *check.C) {
name := "testbuildonbuildforbiddenmaintainerinsourceimage"
out, _ := dockerCmd(c, "create", "busybox", "true")
cleanedContainerID := strings.TrimSpace(out)
dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild")
_, err := buildImage(name,
`FROM onbuild`,
true)
if err != nil {
if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") {
c.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err)
}
} else {
c.Fatal("Error must not be nil")
}
}
func (s *DockerSuite) TestBuildOnBuildForbiddenFromInSourceImage(c *check.C) {
name := "testbuildonbuildforbiddenfrominsourceimage"
out, _ := dockerCmd(c, "create", "busybox", "true")
cleanedContainerID := strings.TrimSpace(out)
dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild")
_, err := buildImage(name,
`FROM onbuild`,
true)
if err != nil {
if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") {
c.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err)
}
} else {
c.Fatal("Error must not be nil")
}
}
func (s *DockerSuite) TestBuildOnBuildForbiddenChainedInSourceImage(c *check.C) {
name := "testbuildonbuildforbiddenchainedinsourceimage"
out, _ := dockerCmd(c, "create", "busybox", "true")
cleanedContainerID := strings.TrimSpace(out)
dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild")
_, err := buildImage(name,
`FROM onbuild`,
true)
if err != nil {
if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") {
c.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err)
}
} else {
c.Fatal("Error must not be nil")
}
}
func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
_, err := buildImage(name1, `
FROM busybox
ONBUILD CMD ["hello world"]
ONBUILD ENTRYPOINT ["echo"]
ONBUILD RUN ["true"]`,
false)
if err != nil {
c.Fatal(err)
}
_, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", name2)
if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
c.Fatalf("did not get echo output from onbuild. Got: %q", out)
}
}
func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
_, err := buildImage(name1, `
FROM busybox
ONBUILD ENTRYPOINT ["echo"]`,
false)
if err != nil {
c.Fatal(err)
}
_, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", name2)
if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
c.Fatal("got malformed output from onbuild", out)
}
}
func (s *DockerSuite) TestBuildCacheAdd(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildtwoimageswithadd"
server, err := fakeStorage(map[string]string{
"robots.txt": "hello",
"index.html": "world",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
if _, err := buildImage(name,
fmt.Sprintf(`FROM scratch
ADD %s/robots.txt /`, server.URL()),
true); err != nil {
c.Fatal(err)
}
if err != nil {
c.Fatal(err)
}
deleteImages(name)
_, out, err := buildImageWithOut(name,
fmt.Sprintf(`FROM scratch
ADD %s/index.html /`, server.URL()),
true)
if err != nil {
c.Fatal(err)
}
if strings.Contains(out, "Using cache") {
c.Fatal("2nd build used cache on ADD, it shouldn't")
}
}
func (s *DockerSuite) TestBuildLastModified(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildlastmodified"
server, err := fakeStorage(map[string]string{
"file": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
var out, out2 string
dFmt := `FROM busybox
ADD %s/file /
RUN ls -le /file`
dockerfile := fmt.Sprintf(dFmt, server.URL())
if _, out, err = buildImageWithOut(name, dockerfile, false); err != nil {
c.Fatal(err)
}
originMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out)
// Make sure our regexp is correct
if strings.Index(originMTime, "/file") < 0 {
c.Fatalf("Missing ls info on 'file':\n%s", out)
}
// Build it again and make sure the mtime of the file didn't change.
// Wait a few seconds to make sure the time changed enough to notice
time.Sleep(2 * time.Second)
if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil {
c.Fatal(err)
}
newMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out2)
if newMTime != originMTime {
c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime)
}
// Now 'touch' the file and make sure the timestamp DID change this time
// Create a new fakeStorage instead of just using Add() to help windows
server, err = fakeStorage(map[string]string{
"file": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
dockerfile = fmt.Sprintf(dFmt, server.URL())
if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil {
c.Fatal(err)
}
newMTime = regexp.MustCompile(`root.*/file.*\n`).FindString(out2)
if newMTime == originMTime {
c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime)
}
}
func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddimg"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod),
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Issue #3960: "ADD src ." hangs
func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) {
name := "testaddsinglefiletoworkdir"
ctx, err := fakeContext(`FROM busybox
ADD test_file .`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
errChan := make(chan error)
go func() {
_, err := buildImageFromContext(name, ctx, true)
errChan <- err
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddsinglefiletoexistdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
server, err := fakeStorage(map[string]string{
"robots.txt": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
name := "testcopymultiplefilestofile"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file1 test_file2 /exists/
ADD test_file3 test_file4 %s/robots.txt /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
`, server.URL()),
map[string]string{
"test_file1": "test1",
"test_file2": "test2",
"test_file3": "test3",
"test_file4": "test4",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// This test is mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildAddToNewDestination(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddtonewdest"
ctx, err := fakeContext(`FROM busybox
ADD . /new_dir
RUN ls -l /
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`,
map[string]string{
"test_dir/test_file": "test file",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// This test is mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildCopyToNewParentDirectory(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopytonewdir"
ctx, err := fakeContext(`FROM busybox
COPY test_dir /new_dir
RUN ls -l /new_dir
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`,
map[string]string{
"test_dir/test_file": "test file",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// This test is mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildWorkdirIsContainerRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testworkdirownership"
if _, err := buildImage(name, `FROM busybox
WORKDIR /new_dir
RUN ls -l /
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently passing on Windows
name := "testaddfilewithwhitespace"
ctx, err := fakeContext(`FROM busybox
RUN mkdir "/test dir"
RUN mkdir "/test_dir"
ADD [ "test file1", "/test_file1" ]
ADD [ "test_file2", "/test file2" ]
ADD [ "test file3", "/test file3" ]
ADD [ "test dir/test_file4", "/test_dir/test_file4" ]
ADD [ "test_dir/test_file5", "/test dir/test_file5" ]
ADD [ "test dir/test_file6", "/test dir/test_file6" ]
RUN [ $(cat "/test_file1") = 'test1' ]
RUN [ $(cat "/test file2") = 'test2' ]
RUN [ $(cat "/test file3") = 'test3' ]
RUN [ $(cat "/test_dir/test_file4") = 'test4' ]
RUN [ $(cat "/test dir/test_file5") = 'test5' ]
RUN [ $(cat "/test dir/test_file6") = 'test6' ]`,
map[string]string{
"test file1": "test1",
"test_file2": "test2",
"test file3": "test3",
"test dir/test_file4": "test4",
"test_dir/test_file5": "test5",
"test dir/test_file6": "test6",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently passing on Windows
name := "testcopyfilewithwhitespace"
ctx, err := fakeContext(`FROM busybox
RUN mkdir "/test dir"
RUN mkdir "/test_dir"
COPY [ "test file1", "/test_file1" ]
COPY [ "test_file2", "/test file2" ]
COPY [ "test file3", "/test file3" ]
COPY [ "test dir/test_file4", "/test_dir/test_file4" ]
COPY [ "test_dir/test_file5", "/test dir/test_file5" ]
COPY [ "test dir/test_file6", "/test dir/test_file6" ]
RUN [ $(cat "/test_file1") = 'test1' ]
RUN [ $(cat "/test file2") = 'test2' ]
RUN [ $(cat "/test file3") = 'test3' ]
RUN [ $(cat "/test_dir/test_file4") = 'test4' ]
RUN [ $(cat "/test dir/test_file5") = 'test5' ]
RUN [ $(cat "/test dir/test_file6") = 'test6' ]`,
map[string]string{
"test file1": "test1",
"test_file2": "test2",
"test file3": "test3",
"test dir/test_file4": "test4",
"test_dir/test_file5": "test5",
"test dir/test_file6": "test6",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testcopywildcard"
server, err := fakeStorage(map[string]string{
"robots.txt": "hello",
"index.html": "world",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
COPY file*.txt /tmp/
RUN ls /tmp/file1.txt /tmp/file2.txt
RUN mkdir /tmp1
COPY dir* /tmp1/
RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file
RUN mkdir /tmp2
ADD dir/*dir %s/robots.txt /tmp2/
RUN ls /tmp2/nest_nest_file /tmp2/robots.txt
`, server.URL()),
map[string]string{
"file1.txt": "test1",
"file2.txt": "test2",
"dir/nested_file": "nested file",
"dir/nested_dir/nest_nest_file": "2 times nested",
"dirt": "dirty",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
// Now make sure we use a cache the 2nd time
id2, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) {
name := "testcopywildcardinname"
ctx, err := fakeContext(`FROM busybox
COPY *.txt /tmp/
RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ]
`, map[string]string{"*.txt": "hi there"})
if err != nil {
// Normally we would do c.Fatal(err) here but given that
// the odds of this failing are so rare, it must be because
// the OS we're running the client on doesn't support * in
// filenames (like windows). So, instead of failing the test
// just let it pass. Then we don't need to explicitly
// say which OSs this works on or not.
return
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatalf("should have built: %q", err)
}
}
func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) {
name := "testcopywildcardcache"
ctx, err := fakeContext(`FROM busybox
COPY file1.txt /tmp/`,
map[string]string{
"file1.txt": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
// Now make sure we use a cache the 2nd time even with wild cards.
// Use the same context so the file is the same and the checksum will match
ctx.Add("Dockerfile", `FROM busybox
COPY file*.txt /tmp/`)
id2, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddsinglefiletononexistingdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testadddircontenttoroot"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testadddircontenttoexistingdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`,
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddwholedirtoroot"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod),
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Testing #5941
func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) {
name := "testaddetctoroot"
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
ADD . /`,
map[string]string{
"etc/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Testing #9401
func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddpreservesfilesspecialbits"
ctx, err := fakeContext(`FROM busybox
ADD suidbin /usr/bin/suidbin
RUN chmod 4755 /usr/bin/suidbin
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]
ADD ./data/ /
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`,
map[string]string{
"suidbin": "suidbin",
"/data/usr/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopysinglefiletoroot"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod),
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Issue #3960: "ADD src ." hangs - adapted for COPY
func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) {
name := "testcopysinglefiletoworkdir"
ctx, err := fakeContext(`FROM busybox
COPY test_file .`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
errChan := make(chan error)
go func() {
_, err := buildImageFromContext(name, ctx, true)
errChan <- err
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopysinglefiletoexistdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopysinglefiletononexistdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopydircontenttoroot"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopydircontenttoexistdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`,
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopywholedirtoroot"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod),
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) {
name := "testcopyetctoroot"
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
COPY . /`,
map[string]string{
"etc/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently working on Windows
dockerfile := `
FROM scratch
ADD links.tar /
ADD foo.txt /symlink/
`
targetFile := "foo.txt"
var (
name = "test-link-absolute"
)
ctx, err := fakeContext(dockerfile, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
var symlinkTarget string
if runtime.GOOS == "windows" {
var driveLetter string
if abs, err := filepath.Abs(tempDir); err != nil {
c.Fatal(err)
} else {
driveLetter = abs[:1]
}
tempDirWithoutDrive := tempDir[2:]
symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive)
} else {
symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir)
}
tarPath := filepath.Join(ctx.Dir, "links.tar")
nonExistingFile := filepath.Join(tempDir, targetFile)
fooPath := filepath.Join(ctx.Dir, targetFile)
tarOut, err := os.Create(tarPath)
if err != nil {
c.Fatal(err)
}
tarWriter := tar.NewWriter(tarOut)
header := &tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: symlinkTarget,
Mode: 0755,
Uid: 0,
Gid: 0,
}
err = tarWriter.WriteHeader(header)
if err != nil {
c.Fatal(err)
}
tarWriter.Close()
tarOut.Close()
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) {
testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox
const (
dockerfileTemplate = `
FROM busybox
RUN ln -s /../../../../../../../../%s /x
VOLUME /x
ADD foo.txt /x/`
targetFile = "foo.txt"
)
var (
name = "test-link-absolute-volume"
dockerfile = ""
)
tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir)
nonExistingFile := filepath.Join(tempDir, targetFile)
ctx, err := fakeContext(dockerfile, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
fooPath := filepath.Join(ctx.Dir, targetFile)
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
// Issue #5270 - ensure we throw a better error than "unexpected EOF"
// when we can't access files in the context.
func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) {
testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows
{
name := "testbuildinaccessiblefiles"
ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// This is used to ensure we detect inaccessible files early during build in the cli client
pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess")
if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown file to root: %s", err)
}
if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name))
buildCmd.Dir = ctx.Dir
out, _, err := runCommandWithOutput(buildCmd)
if err == nil {
c.Fatalf("build should have failed: %s %s", err, out)
}
// check if we've detected the failure before we started building
if !strings.Contains(out, "no permission to read from ") {
c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out)
}
if !strings.Contains(out, "Error checking context") {
c.Fatalf("output should've contained the string: Error checking context")
}
}
{
name := "testbuildinaccessibledirectory"
ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// This is used to ensure we detect inaccessible directories early during build in the cli client
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name))
buildCmd.Dir = ctx.Dir
out, _, err := runCommandWithOutput(buildCmd)
if err == nil {
c.Fatalf("build should have failed: %s %s", err, out)
}
// check if we've detected the failure before we started building
if !strings.Contains(out, "can't stat") {
c.Fatalf("output should've contained the string: can't access %s", out)
}
if !strings.Contains(out, "Error checking context") {
c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out)
}
}
{
name := "testlinksok"
ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
target := "../../../../../../../../../../../../../../../../../../../azA"
if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil {
c.Fatal(err)
}
defer os.Remove(target)
// This is used to ensure we don't follow links when checking if everything in the context is accessible
// This test doesn't require that we run commands as an unprivileged user
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
{
name := "testbuildignoredinaccessible"
ctx, err := fakeContext("FROM scratch\nADD . /foo/",
map[string]string{
"directoryWeCantStat/bar": "foo",
".dockerignore": "directoryWeCantStat",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name))
buildCmd.Dir = ctx.Dir
if out, _, err := runCommandWithOutput(buildCmd); err != nil {
c.Fatalf("build should have worked: %s %s", err, out)
}
}
}
func (s *DockerSuite) TestBuildForceRm(c *check.C) {
containerCountBefore, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
name := "testbuildforcerm"
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
RUN true
RUN thiswillfail`, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".")
containerCountAfter, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
if containerCountBefore != containerCountAfter {
c.Fatalf("--force-rm shouldn't have left containers behind")
}
}
func (s *DockerSuite) TestBuildRm(c *check.C) {
name := "testbuildrm"
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
ADD foo /
ADD foo /`, map[string]string{"foo": "bar"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
{
containerCountBefore, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".")
if err != nil {
c.Fatal("failed to build the image", out)
}
containerCountAfter, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
if containerCountBefore != containerCountAfter {
c.Fatalf("-rm shouldn't have left containers behind")
}
deleteImages(name)
}
{
containerCountBefore, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".")
if err != nil {
c.Fatal("failed to build the image", out)
}
containerCountAfter, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
if containerCountBefore != containerCountAfter {
c.Fatalf("--rm shouldn't have left containers behind")
}
deleteImages(name)
}
{
containerCountBefore, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".")
if err != nil {
c.Fatal("failed to build the image", out)
}
containerCountAfter, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
if containerCountBefore == containerCountAfter {
c.Fatalf("--rm=false should have left containers behind")
}
deleteImages(name)
}
}
func (s *DockerSuite) TestBuildWithVolumes(c *check.C) {
testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows
var (
result map[string]map[string]struct{}
name = "testbuildvolumes"
emptyMap = make(map[string]struct{})
expected = map[string]map[string]struct{}{
"/test1": emptyMap,
"/test2": emptyMap,
"/test3": emptyMap,
"/test4": emptyMap,
"/test5": emptyMap,
"/test6": emptyMap,
"[/test7": emptyMap,
"/test8]": emptyMap,
}
)
_, err := buildImage(name,
`FROM scratch
VOLUME /test1
VOLUME /test2
VOLUME /test3 /test4
VOLUME ["/test5", "/test6"]
VOLUME [/test7 /test8]
`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Volumes")
err = unmarshalJSON([]byte(res), &result)
if err != nil {
c.Fatal(err)
}
equal := reflect.DeepEqual(&result, &expected)
if !equal {
c.Fatalf("Volumes %s, expected %s", result, expected)
}
}
func (s *DockerSuite) TestBuildMaintainer(c *check.C) {
name := "testbuildmaintainer"
expected := "dockerio"
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
MAINTAINER dockerio`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Author")
if res != expected {
c.Fatalf("Maintainer %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildUser(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuilduser"
expected := "dockerio"
_, err := buildImage(name,
`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
USER dockerio
RUN [ $(whoami) = 'dockerio' ]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.User")
if res != expected {
c.Fatalf("User %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) {
name := "testbuildrelativeworkdir"
var (
expected1 string
expected2 string
expected3 string
expected4 string
expectedFinal string
)
if daemonPlatform == "windows" {
expected1 = `C:/`
expected2 = `C:/test1`
expected3 = `C:/test2`
expected4 = `C:/test2/test3`
expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox
} else {
expected1 = `/`
expected2 = `/test1`
expected3 = `/test2`
expected4 = `/test2/test3`
expectedFinal = `/test2/test3`
}
_, err := buildImage(name,
`FROM busybox
RUN sh -c "[ "$PWD" = "`+expected1+`" ]"
WORKDIR test1
RUN sh -c "[ "$PWD" = "`+expected2+`" ]"
WORKDIR /test2
RUN sh -c "[ "$PWD" = "`+expected3+`" ]"
WORKDIR test3
RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.WorkingDir")
if res != expectedFinal {
c.Fatalf("Workdir %s, expected %s", res, expectedFinal)
}
}
// #22181 Regression test. Single end-to-end test of using
// Windows semantics. Most path handling verifications are in unit tests
func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsworkdirprocessing"
_, err := buildImage(name,
`FROM busybox
WORKDIR C:\\foo
WORKDIR bar
RUN sh -c "[ "$PWD" = "C:/foo/bar" ]"
`,
true)
if err != nil {
c.Fatal(err)
}
}
// #22181 Regression test. Most paths handling verifications are in unit test.
// One functional test for end-to-end
func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsaddcopypathprocessing"
// TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to
// support backslash such as .\\ being equivalent to ./ and c:\\ being
// equivalent to c:/. This is not currently (nor ever has been) supported
// by docker on the Windows platform.
dockerfile := `
FROM busybox
# No trailing slash on COPY/ADD
# Results in dir being changed to a file
WORKDIR /wc1
COPY wc1 c:/wc1
WORKDIR /wc2
ADD wc2 c:/wc2
WORKDIR c:/
RUN sh -c "[ $(cat c:/wc1) = 'hellowc1' ]"
RUN sh -c "[ $(cat c:/wc2) = 'worldwc2' ]"
# Trailing slash on COPY/ADD, Windows-style path.
WORKDIR /wd1
COPY wd1 c:/wd1/
WORKDIR /wd2
ADD wd2 c:/wd2/
RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]"
RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]"
`
ctx, err := fakeContext(dockerfile, map[string]string{
"wc1": "hellowc1",
"wc2": "worldwc2",
"wd1": "hellowd1",
"wd2": "worldwd2",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, false)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) {
name := "testbuildworkdirwithenvvariables"
var expected string
if daemonPlatform == "windows" {
expected = `C:\test1\test2`
} else {
expected = `/test1/test2`
}
_, err := buildImage(name,
`FROM busybox
ENV DIRPATH /test1
ENV SUBDIRNAME test2
WORKDIR $DIRPATH
WORKDIR $SUBDIRNAME/$MISSING_VAR`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.WorkingDir")
if res != expected {
c.Fatalf("Workdir %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) {
// cat /test1/test2/foo gets permission denied for the user
testRequires(c, NotUserNamespace)
var expected string
if daemonPlatform == "windows" {
expected = `C:/test1/test2`
} else {
expected = `/test1/test2`
}
name := "testbuildrelativecopy"
dockerfile := `
FROM busybox
WORKDIR /test1
WORKDIR test2
RUN sh -c "[ "$PWD" = '` + expected + `' ]"
COPY foo ./
RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]"
ADD foo ./bar/baz
RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]"
COPY foo ./bar/baz2
RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]"
WORKDIR ..
COPY foo ./
RUN sh -c "[ $(cat /test1/foo) = 'hello' ]"
COPY foo /test3/
RUN sh -c "[ $(cat /test3/foo) = 'hello' ]"
WORKDIR /test4
COPY . .
RUN sh -c "[ $(cat /test4/foo) = 'hello' ]"
WORKDIR /test5/test6
COPY foo ../
RUN sh -c "[ $(cat /test5/foo) = 'hello' ]"
`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, false)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildBlankName(c *check.C) {
name := "testbuildblankname"
_, _, stderr, err := buildImageWithStdoutStderr(name,
`FROM busybox
ENV =`,
true)
if err == nil {
c.Fatal("Build was supposed to fail but didn't")
}
if !strings.Contains(stderr, "ENV names can not be blank") {
c.Fatalf("Missing error message, got: %s", stderr)
}
_, _, stderr, err = buildImageWithStdoutStderr(name,
`FROM busybox
LABEL =`,
true)
if err == nil {
c.Fatal("Build was supposed to fail but didn't")
}
if !strings.Contains(stderr, "LABEL names can not be blank") {
c.Fatalf("Missing error message, got: %s", stderr)
}
_, _, stderr, err = buildImageWithStdoutStderr(name,
`FROM busybox
ARG =foo`,
true)
if err == nil {
c.Fatal("Build was supposed to fail but didn't")
}
if !strings.Contains(stderr, "ARG names can not be blank") {
c.Fatalf("Missing error message, got: %s", stderr)
}
}
func (s *DockerSuite) TestBuildEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
name := "testbuildenv"
expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
_, err := buildImage(name,
`FROM busybox
ENV PATH /test:$PATH
ENV PORT 2375
RUN [ $(env | grep PORT) = 'PORT=2375' ]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Env")
if res != expected {
c.Fatalf("Env %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildPATH(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
fn := func(dockerfile string, exp string) {
_, err := buildImage("testbldpath", dockerfile, true)
c.Assert(err, check.IsNil)
res := inspectField(c, "testbldpath", "Config.Env")
if res != exp {
c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile)
}
}
tests := []struct{ dockerfile, exp string }{
{"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM scratch\nENV PATH=/test", "[PATH=/test]"},
{"FROM busybox\nENV PATH=/test", "[PATH=/test]"},
{"FROM scratch\nENV PATH=''", "[PATH=]"},
{"FROM busybox\nENV PATH=''", "[PATH=]"},
}
for _, test := range tests {
fn(test.dockerfile, test.exp)
}
}
func (s *DockerSuite) TestBuildContextCleanup(c *check.C) {
testRequires(c, DaemonIsLinux)
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
_, err = buildImage(name,
`FROM scratch
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = compareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) {
testRequires(c, DaemonIsLinux)
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
_, err = buildImage(name,
`FROM scratch
RUN /non/existing/command`,
true)
if err == nil {
c.Fatalf("expected build to fail, but it didn't")
}
entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = compareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildCmd(c *check.C) {
name := "testbuildcmd"
expected := "[/bin/echo Hello World]"
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
CMD ["/bin/echo", "Hello World"]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Cmd")
if res != expected {
c.Fatalf("Cmd %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExpose(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexpose"
expected := "map[2375/tcp:{}]"
_, err := buildImage(name,
`FROM scratch
EXPOSE 2375`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
// start building docker file with a large number of ports
portList := make([]string, 50)
line := make([]string, 100)
expectedPorts := make([]int, len(portList)*len(line))
for i := 0; i < len(portList); i++ {
for j := 0; j < len(line); j++ {
p := i*len(line) + j + 1
line[j] = strconv.Itoa(p)
expectedPorts[p-1] = p
}
if i == len(portList)-1 {
portList[i] = strings.Join(line, " ")
} else {
portList[i] = strings.Join(line, " ") + ` \`
}
}
dockerfile := `FROM scratch
EXPOSE {{range .}} {{.}}
{{end}}`
tmpl := template.Must(template.New("dockerfile").Parse(dockerfile))
buf := bytes.NewBuffer(nil)
tmpl.Execute(buf, portList)
name := "testbuildexpose"
_, err := buildImage(name, buf.String(), true)
if err != nil {
c.Fatal(err)
}
// check if all the ports are saved inside Config.ExposedPorts
res := inspectFieldJSON(c, name, "Config.ExposedPorts")
var exposedPorts map[string]interface{}
if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
c.Fatal(err)
}
for _, p := range expectedPorts {
ep := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[ep]; !ok {
c.Errorf("Port(%s) is not exposed", ep)
} else {
delete(exposedPorts, ep)
}
}
if len(exposedPorts) != 0 {
c.Errorf("Unexpected extra exposed ports %v", exposedPorts)
}
}
func (s *DockerSuite) TestBuildExposeOrder(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
buildID := func(name, exposed string) string {
_, err := buildImage(name, fmt.Sprintf(`FROM scratch
EXPOSE %s`, exposed), true)
if err != nil {
c.Fatal(err)
}
id := inspectField(c, name, "Id")
return id
}
id1 := buildID("testbuildexpose1", "80 2375")
id2 := buildID("testbuildexpose2", "2375 80")
if id1 != id2 {
c.Errorf("EXPOSE should invalidate the cache only when ports actually changed")
}
}
func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexposeuppercaseproto"
expected := "map[5678/udp:{}]"
_, err := buildImage(name,
`FROM scratch
EXPOSE 5678/UDP`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) {
name := "testbuildentrypointinheritance"
name2 := "testbuildentrypointinheritance2"
_, err := buildImage(name,
`FROM busybox
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Entrypoint")
expected := "[/bin/echo]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
_, err = buildImage(name2,
fmt.Sprintf(`FROM %s
ENTRYPOINT []`, name),
true)
if err != nil {
c.Fatal(err)
}
res = inspectField(c, name2, "Config.Entrypoint")
expected = "[]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[]"
_, err := buildImage(name,
`FROM busybox
ENTRYPOINT []`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[/bin/echo]"
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
// #6445 ensure ONBUILD triggers aren't committed to grandchildren
func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) {
var (
out2, out3 string
)
{
name1 := "testonbuildtrigger1"
dockerfile1 := `
FROM busybox
RUN echo "GRANDPARENT"
ONBUILD RUN echo "ONBUILD PARENT"
`
ctx, err := fakeContext(dockerfile1, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".")
if err != nil {
c.Fatalf("build failed to complete: %s, %v", out1, err)
}
}
{
name2 := "testonbuildtrigger2"
dockerfile2 := `
FROM testonbuildtrigger1
`
ctx, err := fakeContext(dockerfile2, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".")
if err != nil {
c.Fatalf("build failed to complete: %s, %v", out2, err)
}
}
{
name3 := "testonbuildtrigger3"
dockerfile3 := `
FROM testonbuildtrigger2
`
ctx, err := fakeContext(dockerfile3, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".")
if err != nil {
c.Fatalf("build failed to complete: %s, %v", out3, err)
}
}
// ONBUILD should be run in second build.
if !strings.Contains(out2, "ONBUILD PARENT") {
c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent")
}
// ONBUILD should *not* be run in third build.
if strings.Contains(out3, "ONBUILD PARENT") {
c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent")
}
}
func (s *DockerSuite) TestBuildWithCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildwithcache"
id1, err := buildImage(name,
`FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImage(name,
`FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildWithoutCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildwithoutcache"
name2 := "testbuildwithoutcache2"
id1, err := buildImage(name,
`FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImage(name2,
`FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`,
false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildConditionalCache(c *check.C) {
name := "testbuildconditionalcache"
dockerfile := `
FROM busybox
ADD foo /tmp/`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatalf("Error building #1: %s", err)
}
if err := ctx.Add("foo", "bye"); err != nil {
c.Fatalf("Error modifying foo: %s", err)
}
id2, err := buildImageFromContext(name, ctx, false)
if err != nil {
c.Fatalf("Error building #2: %s", err)
}
if id2 == id1 {
c.Fatal("Should not have used the cache")
}
id3, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatalf("Error building #3: %s", err)
}
if id3 != id2 {
c.Fatal("Should have used the cache")
}
}
func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) {
// local files are not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddlocalfilewithcache"
name2 := "testbuildaddlocalfilewithcache2"
dockerfile := `
FROM busybox
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) {
name := "testbuildaddmultiplelocalfilewithcache"
name2 := "testbuildaddmultiplelocalfilewithcache2"
dockerfile := `
FROM busybox
MAINTAINER dockerio
ADD foo Dockerfile /usr/lib/bla/
RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) {
// local files are not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddlocalfilewithoutcache"
name2 := "testbuildaddlocalfilewithoutcache2"
dockerfile := `
FROM busybox
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) {
name := "testbuildcopydirbutnotfile"
name2 := "testbuildcopydirbutnotfile2"
dockerfile := `
FROM ` + minimalBaseImage() + `
COPY dir /tmp/`
ctx, err := fakeContext(dockerfile, map[string]string{
"dir/foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
// Check that adding file with similar name doesn't mess with cache
if err := ctx.Add("dir_file", "hello2"); err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't")
}
}
func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) {
name := "testbuildaddcurrentdirwithcache"
name2 := name + "2"
name3 := name + "3"
name4 := name + "4"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
// Check that adding file invalidate cache of "ADD ."
if err := ctx.Add("bar", "hello2"); err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file invalidate cache of "ADD ."
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
id3, err := buildImageFromContext(name3, ctx, true)
if err != nil {
c.Fatal(err)
}
if id2 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file to same content with different mtime does not
// invalidate cache of "ADD ."
time.Sleep(1 * time.Second) // wait second because of mtime precision
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
id4, err := buildImageFromContext(name4, ctx, true)
if err != nil {
c.Fatal(err)
}
if id3 != id4 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) {
name := "testbuildaddcurrentdirwithoutcache"
name2 := "testbuildaddcurrentdirwithoutcache2"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildaddremotefilewithcache"
server, err := fakeStorage(map[string]string{
"baz": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
id1, err := buildImage(name,
fmt.Sprintf(`FROM scratch
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImage(name,
fmt.Sprintf(`FROM scratch
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildaddremotefilewithoutcache"
name2 := "testbuildaddremotefilewithoutcache2"
server, err := fakeStorage(map[string]string{
"baz": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
id1, err := buildImage(name,
fmt.Sprintf(`FROM scratch
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImage(name2,
fmt.Sprintf(`FROM scratch
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildaddremotefilemtime"
name2 := name + "2"
name3 := name + "3"
files := map[string]string{"baz": "hello"}
server, err := fakeStorage(files)
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(fmt.Sprintf(`FROM scratch
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't - #1")
}
// Now create a different server with same contents (causes different mtime)
// The cache should still be used
// allow some time for clock to pass as mtime precision is only 1s
time.Sleep(2 * time.Second)
server2, err := fakeStorage(files)
if err != nil {
c.Fatal(err)
}
defer server2.Close()
ctx2, err := fakeContext(fmt.Sprintf(`FROM scratch
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil)
if err != nil {
c.Fatal(err)
}
defer ctx2.Close()
id3, err := buildImageFromContext(name3, ctx2, true)
if err != nil {
c.Fatal(err)
}
if id1 != id3 {
c.Fatal("The cache should have been used but wasn't")
}
}
func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildaddlocalandremotefilewithcache"
server, err := fakeStorage(map[string]string{
"baz": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(fmt.Sprintf(`FROM scratch
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
map[string]string{
"foo": "hello world",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func testContextTar(c *check.C, compression archive.Compression) {
ctx, err := fakeContext(
`FROM busybox
ADD foo /foo
CMD ["cat", "/foo"]`,
map[string]string{
"foo": "bar",
},
)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
context, err := archive.Tar(ctx.Dir, compression)
if err != nil {
c.Fatalf("failed to build context tar: %v", err)
}
name := "contexttar"
buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-")
buildCmd.Stdin = context
if out, _, err := runCommandWithOutput(buildCmd); err != nil {
c.Fatalf("build failed to complete: %v %v", out, err)
}
}
func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) {
testContextTar(c, archive.Gzip)
}
func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) {
testContextTar(c, archive.Uncompressed)
}
func (s *DockerSuite) TestBuildNoContext(c *check.C) {
buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-")
buildCmd.Stdin = strings.NewReader(
`FROM busybox
CMD ["echo", "ok"]`)
if out, _, err := runCommandWithOutput(buildCmd); err != nil {
c.Fatalf("build failed to complete: %v %v", out, err)
}
if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" {
c.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
}
}
// TODO: TestCaching
func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildaddlocalandremotefilewithoutcache"
name2 := "testbuildaddlocalandremotefilewithoutcache2"
server, err := fakeStorage(map[string]string{
"baz": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(fmt.Sprintf(`FROM scratch
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
map[string]string{
"foo": "hello world",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildimg"
_, err := buildImage(name,
`FROM busybox:latest
RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test
VOLUME /test`,
true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test")
if expected := "drw-------"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
if expected := "daemon daemon"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
}
// testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache
func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) {
name := "testbuildcmdcleanup"
if _, err := buildImage(name,
`FROM busybox
RUN echo "hello"`,
true); err != nil {
c.Fatal(err)
}
ctx, err := fakeContext(`FROM busybox
RUN echo "hello"
ADD foo /foo
ENTRYPOINT ["/bin/echo"]`,
map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Cmd")
// Cmd must be cleaned up
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
}
func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) {
name := "testbuildaddnotfound"
expected := "foo: no such file or directory"
if daemonPlatform == "windows" {
expected = "foo: The system cannot find the file specified"
}
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
ADD foo /usr/local/bar`,
map[string]string{"bar": "hello"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
if !strings.Contains(err.Error(), expected) {
c.Fatalf("Wrong error %v, must be about missing foo file or directory", err)
}
} else {
c.Fatal("Error must not be nil")
}
}
func (s *DockerSuite) TestBuildInheritance(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildinheritance"
_, err := buildImage(name,
`FROM scratch
EXPOSE 2375`,
true)
if err != nil {
c.Fatal(err)
}
ports1 := inspectField(c, name, "Config.ExposedPorts")
_, err = buildImage(name,
fmt.Sprintf(`FROM %s
ENTRYPOINT ["/bin/echo"]`, name),
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Entrypoint")
if expected := "[/bin/echo]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
ports2 := inspectField(c, name, "Config.ExposedPorts")
if ports1 != ports2 {
c.Fatalf("Ports must be same: %s != %s", ports1, ports2)
}
}
func (s *DockerSuite) TestBuildFails(c *check.C) {
name := "testbuildfails"
_, err := buildImage(name,
`FROM busybox
RUN sh -c "exit 23"`,
true)
if err != nil {
if !strings.Contains(err.Error(), "returned a non-zero code: 23") {
c.Fatalf("Wrong error %v, must be about non-zero code 23", err)
}
} else {
c.Fatal("Error must not be nil")
}
}
func (s *DockerSuite) TestBuildOnBuild(c *check.C) {
name := "testbuildonbuild"
_, err := buildImage(name,
`FROM busybox
ONBUILD RUN touch foobar`,
true)
if err != nil {
c.Fatal(err)
}
_, err = buildImage(name,
fmt.Sprintf(`FROM %s
RUN [ -f foobar ]`, name),
true)
if err != nil {
c.Fatal(err)
}
}
// gh #2446
func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddtosymlinkdest"
ctx, err := fakeContext(`FROM busybox
RUN mkdir /foo
RUN ln -s /foo /bar
ADD foo /bar/
RUN [ -f /bar/foo ]
RUN [ -f /foo/foo ]`,
map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) {
name := "testbuildescapewhitespace"
_, err := buildImage(name, `
# ESCAPE=\
FROM busybox
MAINTAINER "Docker \
IO <io@\
docker.com>"
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Author")
if res != "\"Docker IO <[email protected]>\"" {
c.Fatalf("Parsed string did not match the escaped string. Got: %q", res)
}
}
func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) {
// Verify that strings that look like ints are still passed as strings
name := "testbuildstringing"
_, err := buildImage(name, `
FROM busybox
MAINTAINER 123
`, true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "inspect", name)
if !strings.Contains(out, "\"123\"") {
c.Fatalf("Output does not contain the int as a string:\n%s", out)
}
}
func (s *DockerSuite) TestBuildDockerignore(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows,
// but currently adds a disproportionate amount of time for the value it has.
// Removing it from Windows CI for now, but this will be revisited in the
// TP5 timeframe when perf is better.
name := "testbuilddockerignore"
dockerfile := `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ ! -e /bla/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ ! -e v.cc ]]"
RUN sh -c "[[ ! -e src/v.cc ]]"
RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Makefile": "all:",
".git/HEAD": "ref: foo",
"src/x.go": "package main",
"src/_vendor/v.go": "package main",
"src/_vendor/v.cc": "package main",
"src/v.cc": "package main",
"v.cc": "package main",
"dir/foo": "",
".gitignore": "",
"README.md": "readme",
".dockerignore": `
.git
pkg
.gitignore
src/_vendor
*.md
**/*.cc
dir`,
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) {
name := "testbuilddockerignorecleanpaths"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "foo",
"foo2": "foo2",
"dir1/foo": "foo in dir1",
".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows,
// but currently adds a disproportionate amount of time for the value it has.
// Removing it from Windows CI for now, but this will be revisited in the
// TP5 timeframe when perf is better.
name := "testbuilddockerignoreexceptions"
dockerfile := `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ -e /bla/dir/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/dir/foo1 ]]"
RUN sh -c "[[ -f /bla/dir/e ]]"
RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ -e /bla/dir/a.cc ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Makefile": "all:",
".git/HEAD": "ref: foo",
"src/x.go": "package main",
"src/_vendor/v.go": "package main",
"dir/foo": "",
"dir/foo1": "",
"dir/dir/f1": "",
"dir/dir/foo": "",
"dir/e": "",
"dir/e-dir/foo": "",
".gitignore": "",
"README.md": "readme",
"dir/a.cc": "hello",
".dockerignore": `
.git
pkg
.gitignore
src/_vendor
*.md
dir
!dir/e*
!dir/dir/foo
**/*.cc
!**/*.cc`,
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
".dockerignore": "Dockerfile\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore Dockerfile correctly:%s", err)
}
// now try it with ./Dockerfile
ctx.Add(".dockerignore", "./Dockerfile\n")
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err)
}
}
func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls /tmp/Dockerfile
RUN sh -c "! ls /tmp/MyDockerfile"
RUN ls /tmp/.dockerignore`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "Should not use me",
"MyDockerfile": dockerfile,
".dockerignore": "MyDockerfile\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err)
}
// now try it with ./MyDockerfile
ctx.Add(".dockerignore", "./MyDockerfile\n")
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err)
}
}
func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) {
name := "testbuilddockerignoredockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/.dockerignore"
RUN ls /tmp/Dockerfile`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
".dockerignore": ".dockerignore\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore .dockerignore correctly:%s", err)
}
}
func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) {
var id1 string
var id2 string
name := "testbuilddockerignoretouchdockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
".dockerignore": "Dockerfile\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if id1, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't build it correctly:%s", err)
}
if id2, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't build it correctly:%s", err)
}
if id1 != id2 {
c.Fatalf("Didn't use the cache - 1")
}
// Now make sure touching Dockerfile doesn't invalidate the cache
if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
if id2, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't build it correctly:%s", err)
}
if id1 != id2 {
c.Fatalf("Didn't use the cache - 2")
}
// One more time but just 'touch' it instead of changing the content
if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
if id2, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't build it correctly:%s", err)
}
if id1 != id2 {
c.Fatalf("Didn't use the cache - 3")
}
}
func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) {
name := "testbuilddockerignorewholedir"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "FROM scratch",
"Makefile": "all:",
".gitignore": "",
".dockerignore": ".*\n",
})
c.Assert(err, check.IsNil)
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil)
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
c.Assert(ctx.Add(".dockerfile", "."), check.IsNil)
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil)
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) {
name := "testbuilddockerignorebadexclusion"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "FROM scratch",
"Makefile": "all:",
".gitignore": "",
".dockerignore": "!\n",
})
c.Assert(err, check.IsNil)
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err == nil {
c.Fatalf("Build was supposed to fail but didn't")
}
if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" {
c.Fatalf("Incorrect output, got:%q", err.Error())
}
}
func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.dockerignore ]]"
RUN sh -c "[[ ! -e /Dockerfile ]]"
RUN sh -c "[[ ! -e /file1 ]]"
RUN sh -c "[[ ! -e /dir ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "FROM scratch",
"file1": "",
"dir/dfile1": "",
})
c.Assert(err, check.IsNil)
defer ctx.Close()
// All of these should result in ignoring all files
for _, variant := range []string{"**", "**/", "**/**", "*"} {
ctx.Add(".dockerignore", variant)
_, err = buildImageFromContext("noname", ctx, true)
c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant))
}
}
func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: Fix this test; also perf
dockerfile := `
FROM busybox
COPY . /
#RUN sh -c "[[ -e /.dockerignore ]]"
RUN sh -c "[[ -e /Dockerfile ]] && \
[[ ! -e /file0 ]] && \
[[ ! -e /dir1/file0 ]] && \
[[ ! -e /dir2/file0 ]] && \
[[ ! -e /file1 ]] && \
[[ ! -e /dir1/file1 ]] && \
[[ ! -e /dir1/dir2/file1 ]] && \
[[ ! -e /dir1/file2 ]] && \
[[ -e /dir1/dir2/file2 ]] && \
[[ ! -e /dir1/dir2/file4 ]] && \
[[ ! -e /dir1/dir2/file5 ]] && \
[[ ! -e /dir1/dir2/file6 ]] && \
[[ ! -e /dir1/dir3/file7 ]] && \
[[ ! -e /dir1/dir3/file8 ]] && \
[[ -e /dir1/dir3 ]] && \
[[ -e /dir1/dir4 ]] && \
[[ ! -e 'dir1/dir5/fileAA' ]] && \
[[ -e 'dir1/dir5/fileAB' ]] && \
[[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing
RUN echo all done!`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "FROM scratch",
"file0": "",
"dir1/file0": "",
"dir1/dir2/file0": "",
"file1": "",
"dir1/file1": "",
"dir1/dir2/file1": "",
"dir1/file2": "",
"dir1/dir2/file2": "", // remains
"dir1/dir2/file4": "",
"dir1/dir2/file5": "",
"dir1/dir2/file6": "",
"dir1/dir3/file7": "",
"dir1/dir3/file8": "",
"dir1/dir4/file9": "",
"dir1/dir5/fileAA": "",
"dir1/dir5/fileAB": "",
"dir1/dir5/fileB": "",
".dockerignore": `
**/file0
**/*file1
**/dir1/file2
dir1/**/file4
**/dir2/file5
**/dir1/dir2/file6
dir1/dir3/**
**/dir4/**
**/file?A
**/file\?B
**/dir5/file.
`,
})
c.Assert(err, check.IsNil)
defer ctx.Close()
_, err = buildImageFromContext("noname", ctx, true)
c.Assert(err, check.IsNil)
}
func (s *DockerSuite) TestBuildLineBreak(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildlinebreak"
_, err := buildImage(name,
`FROM busybox
RUN sh -c 'echo root:testpass \
> /tmp/passwd'
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`,
true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEOLInLine(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildeolinline"
_, err := buildImage(name,
`FROM busybox
RUN sh -c 'echo root:testpass > /tmp/passwd'
RUN echo "foo \n bar"; echo "baz"
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`,
true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildcomments"
_, err := buildImage(name,
`FROM busybox
# This is an ordinary comment.
RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
RUN [ ! -x /hello.sh ]
# comment with line break \
RUN chmod +x /hello.sh
RUN [ -x /hello.sh ]
RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
RUN [ "$(/hello.sh)" = "hello world" ]`,
true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildusers"
_, err := buildImage(name,
`FROM busybox
# Make sure our defaults work
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ]
# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0)
USER root
RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ]
# Setup dockerio user and group
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \
echo 'dockerio:x:1001:' >> /etc/group
# Make sure we can switch to our user and all the information is exactly as we expect it to be
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
# Switch back to root and double check that worked exactly as we might expect it to
USER root
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \
# Add a "supplementary" group for our dockerio user \
echo 'supplementary:x:1002:dockerio' >> /etc/group
# ... and then go verify that we get it like we expect
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
USER 1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
# super test the new "user:group" syntax
USER dockerio:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER dockerio:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
# make sure unknown uid/gid still works properly
USER 1042:1043
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`,
true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEnvUsage(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage"
dockerfile := `FROM busybox
ENV HOME /root
ENV PATH $HOME/bin:$PATH
ENV PATH /tmp:$PATH
RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ]
ENV FOO /foo/baz
ENV BAR /bar
ENV BAZ $BAR
ENV FOOPATH $PATH:$FOO
RUN [ "$BAR" = "$BAZ" ]
RUN [ "$FOOPATH" = "$PATH:/foo/baz" ]
ENV FROM hello/docker/world
ENV TO /docker/world/hello
ADD $FROM $TO
RUN [ "$(cat $TO)" = "hello" ]
ENV abc=def
ENV ghi=$abc
RUN [ "$ghi" = "def" ]
`
ctx, err := fakeContext(dockerfile, map[string]string{
"hello/docker/world": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage2"
dockerfile := `FROM busybox
ENV abc=def def="hello world"
RUN [ "$abc,$def" = "def,hello world" ]
ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too"
RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ]
ENV abc=zzz FROM=hello/docker/world
ENV abc=zzz TO=/docker/world/hello
ADD $FROM $TO
RUN [ "$abc,$(cat $TO)" = "zzz,hello" ]
ENV abc 'yyy'
RUN [ $abc = 'yyy' ]
ENV abc=
RUN [ "$abc" = "" ]
# use grep to make sure if the builder substitutes \$foo by mistake
# we don't get a false positive
ENV abc=\$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc \$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc=\'foo\' abc2=\"foo\"
RUN [ "$abc,$abc2" = "'foo',\"foo\"" ]
ENV abc "foo"
RUN [ "$abc" = "foo" ]
ENV abc 'foo'
RUN [ "$abc" = 'foo' ]
ENV abc \'foo\'
RUN [ "$abc" = "'foo'" ]
ENV abc \"foo\"
RUN [ "$abc" = '"foo"' ]
ENV abc=ABC
RUN [ "$abc" = "ABC" ]
ENV def1=${abc:-DEF} def2=${ccc:-DEF}
ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:}
RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ]
ENV mypath=${mypath:+$mypath:}/home
ENV mypath=${mypath:+$mypath:}/away
RUN [ "$mypath" = '/home:/away' ]
ENV e1=bar
ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11
RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ]
ENV ee1 bar
ENV ee2 $ee1
ENV ee3 $ee11
ENV ee4 \$ee1
ENV ee5 \$ee11
RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ]
ENV eee1="foo" eee2='foo'
ENV eee3 "foo"
ENV eee4 'foo'
RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ]
`
ctx, err := fakeContext(dockerfile, map[string]string{
"hello/docker/world": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddScript(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddscript"
dockerfile := `
FROM busybox
ADD test /test
RUN ["chmod","+x","/test"]
RUN ["/test"]
RUN [ "$(cat /testfile)" = 'test!' ]`
ctx, err := fakeContext(dockerfile, map[string]string{
"test": "#!/bin/sh\necho 'test!' > /testfile",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddTar(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddtar"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar /
RUN cat /test/foo | grep Hi
ADD test.tar /test.tar
RUN cat /test.tar/test/foo | grep Hi
ADD test.tar /unlikely-to-exist
RUN cat /unlikely-to-exist/test/foo | grep Hi
ADD test.tar /unlikely-to-exist-trailing-slash/
RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi
RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir
ADD test.tar /existing-directory
RUN cat /existing-directory/test/foo | grep Hi
ADD test.tar /existing-directory-trailing-slash/
RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("build failed to complete for TestBuildAddTar: %v", err)
}
}
func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) {
name := "testbuildaddbrokentar"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar /`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
// Corrupt the tar by removing one byte off the end
stat, err := testTar.Stat()
if err != nil {
c.Fatalf("failed to stat tar archive: %v", err)
}
if err := testTar.Truncate(stat.Size() - 1); err != nil {
c.Fatalf("failed to truncate tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err == nil {
c.Fatalf("build should have failed for TestBuildAddBrokenTar")
}
}
func (s *DockerSuite) TestBuildAddNonTar(c *check.C) {
name := "testbuildaddnontar"
// Should not try to extract test.tar
ctx, err := fakeContext(`
FROM busybox
ADD test.tar /
RUN test -f /test.tar`,
map[string]string{"test.tar": "not_a_tar_file"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("build failed for TestBuildAddNonTar")
}
}
func (s *DockerSuite) TestBuildAddTarXz(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxz"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar.xz /
RUN cat /test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
xzCompressCmd := exec.Command("xz", "-k", "test.tar")
xzCompressCmd.Dir = tmpDir
out, _, err := runCommandWithOutput(xzCompressCmd)
if err != nil {
c.Fatal(err, out)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
}
}
func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxzgz"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar.xz.gz /
RUN ls /test.tar.xz.gz`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
xzCompressCmd := exec.Command("xz", "-k", "test.tar")
xzCompressCmd.Dir = tmpDir
out, _, err := runCommandWithOutput(xzCompressCmd)
if err != nil {
c.Fatal(err, out)
}
gzipCompressCmd := exec.Command("gzip", "test.tar.xz")
gzipCompressCmd.Dir = tmpDir
out, _, err = runCommandWithOutput(gzipCompressCmd)
if err != nil {
c.Fatal(err, out)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
}
}
func (s *DockerSuite) TestBuildFromGIT(c *check.C) {
name := "testbuildfromgit"
git, err := newFakeGit("repo", map[string]string{
"Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"first": "test git data",
}, true)
if err != nil {
c.Fatal(err)
}
defer git.Close()
_, err = buildImageFromPath(name, git.RepoURL, true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGITWithContext(c *check.C) {
name := "testbuildfromgit"
git, err := newFakeGit("repo", map[string]string{
"docker/Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"docker/first": "test git data",
}, true)
if err != nil {
c.Fatal(err)
}
defer git.Close()
u := fmt.Sprintf("%s#master:docker", git.RepoURL)
_, err = buildImageFromPath(name, u, true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGITwithF(c *check.C) {
name := "testbuildfromgitwithf"
git, err := newFakeGit("repo", map[string]string{
"myApp/myDockerfile": `FROM busybox
RUN echo hi from Dockerfile`,
}, true)
if err != nil {
c.Fatal(err)
}
defer git.Close()
out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL)
if err != nil {
c.Fatalf("Error on build. Out: %s\nErr: %v", out, err)
}
if !strings.Contains(out, "hi from Dockerfile") {
c.Fatalf("Missing expected output, got:\n%s", out)
}
}
func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildfromremotetarball"
buffer := new(bytes.Buffer)
tw := tar.NewWriter(buffer)
defer tw.Close()
dockerfile := []byte(`FROM busybox
MAINTAINER docker`)
if err := tw.WriteHeader(&tar.Header{
Name: "Dockerfile",
Size: int64(len(dockerfile)),
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write(dockerfile); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
server, err := fakeBinaryStorage(map[string]*bytes.Buffer{
"testT.tar": buffer,
})
c.Assert(err, check.IsNil)
defer server.Close()
_, err = buildImageFromPath(name, server.URL()+"/testT.tar", true)
c.Assert(err, check.IsNil)
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) {
name := "testbuildcmdcleanuponentrypoint"
if _, err := buildImage(name,
`FROM `+minimalBaseImage()+`
CMD ["test"]
ENTRYPOINT ["echo"]`,
true); err != nil {
c.Fatal(err)
}
if _, err := buildImage(name,
fmt.Sprintf(`FROM %s
ENTRYPOINT ["cat"]`, name),
true); err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
res = inspectField(c, name, "Config.Entrypoint")
if expected := "[cat]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildClearCmd(c *check.C) {
name := "testbuildclearcmd"
_, err := buildImage(name,
`From `+minimalBaseImage()+`
ENTRYPOINT ["/bin/bash"]
CMD []`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected %s", res, "[]")
}
}
func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) {
// Windows Server 2016 RS1 builds load the windowsservercore image from a tar rather than
// a .WIM file, and the tar layer has the default CMD set (same as the Linux ubuntu image),
// where-as the TP5 .WIM had a blank CMD. Hence this test is not applicable on RS1 or later
// builds
if daemonPlatform == "windows" && windowsDaemonKV >= 14375 {
c.Skip("Not applicable on Windows RS1 or later builds")
}
name := "testbuildemptycmd"
if _, err := buildImage(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true); err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "null" {
c.Fatalf("Cmd %s, expected %s", res, "null")
}
}
func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) {
name := "testbuildonbuildparent"
if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil {
c.Fatal(err)
}
_, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "# Executing 1 build trigger") {
c.Fatal("failed to find the build trigger output", out)
}
}
func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
_, out, err := buildImageWithOut(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true)
// if the error doesn't check for illegal tag name, or the image is built
// then this should fail
if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") {
c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out)
}
}
func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) {
name := "testbuildcmdshc"
if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["/bin/sh","-c","echo cmd"]`
if daemonPlatform == "windows" {
expected = `["cmd","/S","/C","echo cmd"]`
}
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) {
// Test to make sure that when we strcat arrays we take into account
// the arg separator to make sure ["echo","hi"] and ["echo hi"] don't
// look the same
name := "testbuildcmdspaces"
var id1 string
var id2 string
var err error
if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil {
c.Fatal(err)
}
if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("Should not have resulted in the same CMD")
}
// Now do the same with ENTRYPOINT
if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil {
c.Fatal(err)
}
if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("Should not have resulted in the same ENTRYPOINT")
}
}
func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) {
name := "testbuildcmdjson"
if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["echo","cmd"]`
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) {
if _, err := buildImage("parent", `
FROM busybox
ENTRYPOINT exit 130
`, true); err != nil {
c.Fatal(err)
}
if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 {
c.Fatalf("expected exit code 130 but received %d", status)
}
if _, err := buildImage("child", `
FROM parent
ENTRYPOINT exit 5
`, true); err != nil {
c.Fatal(err)
}
if _, status, _ := dockerCmdWithError("run", "child"); status != 5 {
c.Fatalf("expected exit code 5 but received %d", status)
}
}
func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) {
var (
name = "testbuildepinherit"
name2 = "testbuildepinherit2"
expected = `["/bin/sh","-c","echo quux"]`
)
if daemonPlatform == "windows" {
expected = `["cmd","/S","/C","echo quux"]`
}
if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil {
c.Fatal(err)
}
if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name2, "Config.Entrypoint")
if res != expected {
c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res)
}
out, _ := dockerCmd(c, "run", name2)
expected = "quux"
if strings.TrimSpace(out) != expected {
c.Fatalf("Expected output is %s, got %s", expected, out)
}
}
func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) {
name := "testbuildentrypoint"
_, err := buildImage(name,
`FROM busybox
ENTRYPOINT echo`,
true)
if err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "--rm", name)
}
func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildexoticshellinterpolation"
_, err := buildImage(name, `
FROM busybox
ENV SOME_VAR a.b.c
RUN [ "$SOME_VAR" = 'a.b.c' ]
RUN [ "${SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR%.*}" = 'a.b' ]
RUN [ "${SOME_VAR%%.*}" = 'a' ]
RUN [ "${SOME_VAR#*.}" = 'b.c' ]
RUN [ "${SOME_VAR##*.}" = 'c' ]
RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ]
RUN [ "${#SOME_VAR}" = '5' ]
RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ]
RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ]
RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ]
`, false)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
// This testcase is supposed to generate an error because the
// JSON array we're passing in on the CMD uses single quotes instead
// of double quotes (per the JSON spec). This means we interpret it
// as a "string" instead of "JSON array" and pass it on to "sh -c" and
// it should barf on it.
name := "testbuildsinglequotefails"
if _, err := buildImage(name,
`FROM busybox
CMD [ '/bin/sh', '-c', 'echo hi' ]`,
true); err != nil {
c.Fatal(err)
}
if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil {
c.Fatal("The image was not supposed to be able to run")
}
}
func (s *DockerSuite) TestBuildVerboseOut(c *check.C) {
name := "testbuildverboseout"
expected := "\n123\n"
if daemonPlatform == "windows" {
expected = "\n123\r\n"
}
_, out, err := buildImageWithOut(name,
`FROM busybox
RUN echo 123`,
false)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, expected) {
c.Fatalf("Output should contain %q: %q", "123", out)
}
}
func (s *DockerSuite) TestBuildWithTabs(c *check.C) {
name := "testbuildwithtabs"
_, err := buildImage(name,
"FROM busybox\nRUN echo\tone\t\ttwo", true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]`
expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
if daemonPlatform == "windows" {
expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]`
expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
}
if res != expected1 && res != expected2 {
c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2)
}
}
func (s *DockerSuite) TestBuildLabels(c *check.C) {
name := "testbuildlabel"
expected := `{"License":"GPL","Vendor":"Acme"}`
_, err := buildImage(name,
`FROM busybox
LABEL Vendor=Acme
LABEL License GPL`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildLabelsCache(c *check.C) {
name := "testbuildlabelcache"
id1, err := buildImage(name,
`FROM busybox
LABEL Vendor=Acme`, false)
if err != nil {
c.Fatalf("Build 1 should have worked: %v", err)
}
id2, err := buildImage(name,
`FROM busybox
LABEL Vendor=Acme`, true)
if err != nil || id1 != id2 {
c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err)
}
id2, err = buildImage(name,
`FROM busybox
LABEL Vendor=Acme1`, true)
if err != nil || id1 == id2 {
c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err)
}
id2, err = buildImage(name,
`FROM busybox
LABEL Vendor Acme`, true) // Note: " " and "=" should be same
if err != nil || id1 != id2 {
c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err)
}
// Now make sure the cache isn't used by mistake
id1, err = buildImage(name,
`FROM busybox
LABEL f1=b1 f2=b2`, false)
if err != nil {
c.Fatalf("Build 5 should have worked: %q", err)
}
id2, err = buildImage(name,
`FROM busybox
LABEL f1="b1 f2=b2"`, true)
if err != nil || id1 == id2 {
c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err)
}
}
func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) {
// This test makes sure that -q works correctly when build is successful:
// stdout has only the image ID (long image ID) and stderr is empty.
var stdout, stderr string
var err error
outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$")
tt := []struct {
Name string
BuildFunc func(string)
}{
{
Name: "quiet_build_stdin_success",
BuildFunc: func(name string) {
_, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm")
},
},
{
Name: "quiet_build_ctx_success",
BuildFunc: func(name string) {
ctx, err := fakeContext("FROM busybox", map[string]string{
"quiet_build_success_fctx": "test",
})
if err != nil {
c.Fatalf("Failed to create context: %s", err.Error())
}
defer ctx.Close()
_, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm")
},
},
{
Name: "quiet_build_git_success",
BuildFunc: func(name string) {
git, err := newFakeGit("repo", map[string]string{
"Dockerfile": "FROM busybox",
}, true)
if err != nil {
c.Fatalf("Failed to create the git repo: %s", err.Error())
}
defer git.Close()
_, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm")
},
},
}
for _, te := range tt {
te.BuildFunc(te.Name)
if err != nil {
c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error())
}
if outRegexp.Find([]byte(stdout)) == nil {
c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout)
}
if stderr != "" {
c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr)
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
testRequires(c, Network)
testName := "quiet_build_not_exists_image"
buildCmd := "FROM busybox11"
_, _, qstderr, qerr := buildImageWithStdoutStderr(testName, buildCmd, false, "-q", "--force-rm", "--rm")
_, vstdout, vstderr, verr := buildImageWithStdoutStderr(testName, buildCmd, false, "--force-rm", "--rm")
if verr == nil || qerr == nil {
c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", testName))
}
if qstderr != vstdout+vstderr {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, qstderr, vstdout+vstderr))
}
}
func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
tt := []struct {
TestName string
BuildCmds string
}{
{"quiet_build_no_from_at_the_beginning", "RUN whoami"},
{"quiet_build_unknown_instr", "FROMD busybox"},
}
for _, te := range tt {
_, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm")
_, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm")
if verr == nil || qerr == nil {
c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName))
}
if qstderr != vstdout+vstderr {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr))
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) {
// This test ensures that when given a wrong URL, stderr in quiet mode and
// stderr in verbose mode are identical.
// TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout
URL := "http://something.invalid"
Name := "quiet_build_wrong_remote"
_, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL)
_, _, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL)
if qerr == nil || verr == nil {
c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name))
}
if qstderr != vstderr {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstderr))
}
}
func (s *DockerSuite) TestBuildStderr(c *check.C) {
// This test just makes sure that no non-error output goes
// to stderr
name := "testbuildstderr"
_, _, stderr, err := buildImageWithStdoutStderr(name,
"FROM busybox\nRUN echo one", true)
if err != nil {
c.Fatal(err)
}
if runtime.GOOS == "windows" &&
daemonPlatform != "windows" {
// Windows to non-Windows should have a security warning
if !strings.Contains(stderr, "SECURITY WARNING:") {
c.Fatalf("Stderr contains unexpected output: %q", stderr)
}
} else {
// Other platform combinations should have no stderr written too
if stderr != "" {
c.Fatalf("Stderr should have been empty, instead it's: %q", stderr)
}
}
}
func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) {
testRequires(c, UnixCli) // test uses chown: not available on windows
testRequires(c, DaemonIsLinux)
name := "testbuildchownsinglefile"
ctx, err := fakeContext(`
FROM busybox
COPY test /
RUN ls -l /test
RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ]
`, map[string]string{
"test": "test",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil {
c.Fatal(err)
}
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) {
name := "testbuildsymlinkbreakout"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
from busybox
add symlink.tar /
add inject /symlink/
`), 0644); err != nil {
c.Fatal(err)
}
inject := filepath.Join(ctx, "inject")
if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
c.Fatal(err)
}
f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
if err != nil {
c.Fatal(err)
}
w := tar.NewWriter(f)
w.WriteHeader(&tar.Header{
Name: "symlink2",
Typeflag: tar.TypeSymlink,
Linkname: "/../../../../../../../../../../../../../../",
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.WriteHeader(&tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: filepath.Join("symlink2", tmpdir),
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.Close()
f.Close()
if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil {
c.Fatal(err)
}
if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
c.Fatal("symlink breakout - inject")
} else if !os.IsNotExist(err) {
c.Fatalf("unexpected error: %v", err)
}
}
func (s *DockerSuite) TestBuildXZHost(c *check.C) {
// /usr/local/sbin/xz gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildxzhost"
ctx, err := fakeContext(`
FROM busybox
ADD xz /usr/local/sbin/
RUN chmod 755 /usr/local/sbin/xz
ADD test.xz /
RUN [ ! -e /injected ]`,
map[string]string{
"test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" +
"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" +
"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21",
"xz": "#!/bin/sh\ntouch /injected",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) {
// /foo/file gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127
var (
name = "testbuildvolumescontent"
expected = "some text"
volName = "/foo"
)
if daemonPlatform == "windows" {
volName = "C:/foo"
}
ctx, err := fakeContext(`
FROM busybox
COPY content /foo/file
VOLUME `+volName+`
CMD cat /foo/file`,
map[string]string{
"content": expected,
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, false); err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--rm", name)
if out != expected {
c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out)
}
}
func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) {
ctx, err := fakeContext(`FROM busybox
RUN echo from Dockerfile`,
map[string]string{
"Dockerfile": "FROM busybox\nRUN echo from Dockerfile",
"files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile",
"files/dFile": "FROM busybox\nRUN echo from files/dFile",
"dFile": "FROM busybox\nRUN echo from dFile",
"files/dFile2": "FROM busybox\nRUN echo from files/dFile2",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test1 should have used Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from files/Dockerfile") {
c.Fatalf("test2 should have used files/Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from files/dFile") {
c.Fatalf("test3 should have used files/dFile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from dFile") {
c.Fatalf("test4 should have used dFile, output:%s", out)
}
dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5")
c.Assert(err, check.IsNil)
nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile")
if _, err = os.Create(nonDockerfileFile); err != nil {
c.Fatal(err)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".")
if err == nil {
c.Fatalf("test5 was supposed to fail to find passwd")
}
if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) {
c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..")
if err != nil {
c.Fatalf("test6 failed: %s", err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test6 should have used root Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..")
if err != nil {
c.Fatalf("test7 failed: %s", err)
}
if !strings.Contains(out, "from files/Dockerfile") {
c.Fatalf("test7 should have used files Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".")
if err == nil || !strings.Contains(out, "must be within the build context") {
c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err)
}
tmpDir := os.TempDir()
out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir)
if err != nil {
c.Fatalf("test9 - failed: %s", err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test9 should have used root Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".")
if err != nil {
c.Fatalf("test10 should have worked: %s", err)
}
if !strings.Contains(out, "from files/dFile2") {
c.Fatalf("test10 should have used files/dFile2, output:%s", out)
}
}
func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) {
testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
testRequires(c, DaemonIsLinux)
ctx, err := fakeContext(`FROM busybox
RUN echo from dockerfile`,
map[string]string{
"dockerfile": "FROM busybox\nRUN echo from dockerfile",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from dockerfile") {
c.Fatalf("Missing proper output: %s", out)
}
}
func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) {
testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
testRequires(c, DaemonIsLinux)
ctx, err := fakeContext(`FROM busybox
RUN echo from Dockerfile`,
map[string]string{
"dockerfile": "FROM busybox\nRUN echo from dockerfile",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("Missing proper output: %s", out)
}
}
func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) {
testRequires(c, DaemonIsLinux)
server, err := fakeStorage(map[string]string{"baz": `FROM busybox
RUN echo from baz
COPY * /tmp/
RUN find /tmp/`})
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(`FROM busybox
RUN echo from Dockerfile`,
map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from baz") ||
strings.Contains(out, "/tmp/baz") ||
!strings.Contains(out, "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", out)
}
}
func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why
ctx, err := fakeContext(`FROM busybox
RUN echo "from Dockerfile"`,
map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-")
dockerCommand.Dir = ctx.Dir
dockerCommand.Stdin = strings.NewReader(`FROM busybox
RUN echo "from baz"
COPY * /tmp/
RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`)
out, status, err := runCommandWithOutput(dockerCommand)
if err != nil || status != 0 {
c.Fatalf("Error building: %s", err)
}
if !strings.Contains(out, "from baz") ||
strings.Contains(out, "/tmp/baz") ||
!strings.Contains(out, "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", out)
}
}
func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) {
name := "testbuildfromofficial"
fromNames := []string{
"busybox",
"docker.io/busybox",
"index.docker.io/busybox",
"library/busybox",
"docker.io/library/busybox",
"index.docker.io/library/busybox",
}
for idx, fromName := range fromNames {
imgName := fmt.Sprintf("%s%d", name, idx)
_, err := buildImage(imgName, "FROM "+fromName, true)
if err != nil {
c.Errorf("Build failed using FROM %s: %s", fromName, err)
}
deleteImages(imgName)
}
}
func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) {
testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2)
testRequires(c, DaemonIsLinux)
name := "testbuilddockerfileoutsidecontext"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil {
c.Fatal(err)
}
wd, err := os.Getwd()
if err != nil {
c.Fatal(err)
}
defer os.Chdir(wd)
if err := os.Chdir(ctx); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil {
c.Fatal(err)
}
for _, dockerfilePath := range []string{
filepath.Join("..", "outsideDockerfile"),
filepath.Join(ctx, "dockerfile1"),
filepath.Join(ctx, "dockerfile2"),
} {
out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".")
if err == nil {
c.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out)
}
if !strings.Contains(out, "must be within the build context") && !strings.Contains(out, "Cannot locate Dockerfile") {
c.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out)
}
deleteImages(name)
}
os.Chdir(tmpdir)
// Path to Dockerfile should be resolved relative to working directory, not relative to context.
// There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail
out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx)
if err == nil {
c.Fatalf("Expected error. Out: %s", out)
}
}
func (s *DockerSuite) TestBuildSpaces(c *check.C) {
// Test to make sure that leading/trailing spaces on a command
// doesn't change the error msg we get
var (
err1 error
err2 error
)
name := "testspaces"
ctx, err := fakeContext("FROM busybox\nCOPY\n",
map[string]string{
"Dockerfile": "FROM busybox\nCOPY\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil {
c.Fatal("Build 1 was supposed to fail, but didn't")
}
ctx.Add("Dockerfile", "FROM busybox\nCOPY ")
if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
c.Fatal("Build 2 was supposed to fail, but didn't")
}
removeLogTimestamps := func(s string) string {
return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`)
}
// Skip over the times
e1 := removeLogTimestamps(err1.Error())
e2 := removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY")
if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
c.Fatal("Build 3 was supposed to fail, but didn't")
}
// Skip over the times
e1 = removeLogTimestamps(err1.Error())
e2 = removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY ")
if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
c.Fatal("Build 4 was supposed to fail, but didn't")
}
// Skip over the times
e1 = removeLogTimestamps(err1.Error())
e2 = removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2)
}
}
func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) {
testRequires(c, DaemonIsLinux)
// Test to make sure that spaces in quotes aren't lost
name := "testspacesquotes"
dockerfile := `FROM busybox
RUN echo " \
foo "`
_, out, err := buildImageWithOut(name, dockerfile, false)
if err != nil {
c.Fatal("Build failed:", err)
}
expecting := "\n foo \n"
if !strings.Contains(out, expecting) {
c.Fatalf("Bad output: %q expecting to contain %q", out, expecting)
}
}
// #4393
func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This should error out
buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-")
buildCmd.Stdin = strings.NewReader(`
FROM busybox
RUN touch /foo
VOLUME /foo
`)
out, _, err := runCommandWithOutput(buildCmd)
if err == nil || !strings.Contains(out, "file exists") {
c.Fatalf("expected build to fail when file exists in container at requested volume path")
}
}
func (s *DockerSuite) TestBuildMissingArgs(c *check.C) {
// Test to make sure that all Dockerfile commands (except the ones listed
// in skipCmds) will generate an error if no args are provided.
// Note: INSERT is deprecated so we exclude it because of that.
skipCmds := map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
}
if daemonPlatform == "windows" {
skipCmds = map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
"STOPSIGNAL": {},
"ARG": {},
"USER": {},
"EXPOSE": {},
}
}
for cmd := range command.Commands {
cmd = strings.ToUpper(cmd)
if _, ok := skipCmds[cmd]; ok {
continue
}
var dockerfile string
if cmd == "FROM" {
dockerfile = cmd
} else {
// Add FROM to make sure we don't complain about it missing
dockerfile = "FROM busybox\n" + cmd
}
ctx, err := fakeContext(dockerfile, map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
var out string
if out, err = buildImageFromContext("args", ctx, true); err == nil {
c.Fatalf("%s was supposed to fail. Out:%s", cmd, out)
}
if !strings.Contains(err.Error(), cmd+" requires") {
c.Fatalf("%s returned the wrong type of error:%s", cmd, err)
}
}
}
func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
_, out, err := buildImageWithOut("sc", "FROM scratch", true)
if err == nil {
c.Fatalf("Build was supposed to fail")
}
if !strings.Contains(out, "No image was generated") {
c.Fatalf("Wrong error message: %v", out)
}
}
func (s *DockerSuite) TestBuildDotDotFile(c *check.C) {
ctx, err := fakeContext("FROM busybox\n",
map[string]string{
"..gitme": "",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err = buildImageFromContext("sc", ctx, false); err != nil {
c.Fatalf("Build was supposed to work: %s", err)
}
}
func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) {
testRequires(c, DaemonIsLinux) // No hello-world Windows image
name := "testbuildrunonejson"
ctx, err := fakeContext(`FROM hello-world:frozen
RUN [ "/hello" ]`, map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".")
if err != nil {
c.Fatalf("failed to build the image: %s, %v", out, err)
}
if !strings.Contains(out, "Hello from Docker") {
c.Fatalf("bad output: %s", out)
}
}
func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) {
name := "testbuildemptystringvolume"
_, err := buildImage(name, `
FROM busybox
ENV foo=""
VOLUME $foo
`, false)
if err == nil {
c.Fatal("Should have failed to build")
}
}
func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) {
testRequires(c, SameHostDaemon)
testRequires(c, DaemonIsLinux)
cgroupParent := "test"
data, err := ioutil.ReadFile("/proc/self/cgroup")
if err != nil {
c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
}
selfCgroupPaths := parseCgroupPaths(string(data))
_, found := selfCgroupPaths["memory"]
if !found {
c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths)
}
cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-")
cmd.Stdin = strings.NewReader(`
FROM busybox
RUN cat /proc/self/cgroup
`)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out)
c.Assert(err, check.IsNil)
if !m {
c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out)
}
}
func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) {
// Check to make sure our build output prints the Dockerfile cmd
// property - there was a bug that caused it to be duplicated on the
// Step X line
name := "testbuildnodupoutput"
_, out, err := buildImageWithOut(name, `
FROM busybox
RUN env`, false)
if err != nil {
c.Fatalf("Build should have worked: %q", err)
}
exp := "\nStep 2 : RUN env\n"
if !strings.Contains(out, exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp)
}
}
// GH15826
func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) {
// Explicit check to ensure that build starts from step 1 rather than 0
name := "testbuildstartsfromone"
_, out, err := buildImageWithOut(name, `
FROM busybox`, false)
if err != nil {
c.Fatalf("Build should have worked: %q", err)
}
exp := "\nStep 1 : FROM busybox\n"
if !strings.Contains(out, exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp)
}
}
func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) {
// Test to make sure the bad command is quoted with just "s and
// not as a Go []string
name := "testbuildbadrunerrmsg"
_, out, err := buildImageWithOut(name, `
FROM busybox
RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3
if err == nil {
c.Fatal("Should have failed to build")
}
shell := "/bin/sh -c"
exitCode := "127"
if daemonPlatform == "windows" {
shell = "cmd /S /C"
// architectural - Windows has to start the container to determine the exe is bad, Linux does not
exitCode = "1"
}
exp := `The command '` + shell + ` badEXE a1 \& a2 a3' returned a non-zero code: ` + exitCode
if !strings.Contains(out, exp) {
c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp)
}
}
func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) {
repoName := s.setupTrustedImage(c, "trusted-build")
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuild"
buildCmd := buildImageCmd(name, dockerFile, true)
s.trustedCmd(buildCmd)
out, _, err := runCommandWithOutput(buildCmd)
if err != nil {
c.Fatalf("Error running trusted build: %s\n%s", err, out)
}
if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) {
c.Fatalf("Unexpected output on trusted build:\n%s", out)
}
// We should also have a tag reference for the image.
if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 {
c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out)
}
// We should now be able to remove the tag reference.
if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 {
c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out)
}
}
func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuilduntrustedtag"
buildCmd := buildImageCmd(name, dockerFile, true)
s.trustedCmd(buildCmd)
out, _, err := runCommandWithOutput(buildCmd)
if err == nil {
c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out)
}
if !strings.Contains(out, "does not have trust data for") {
c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out)
}
}
func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) {
testRequires(c, DaemonIsLinux)
tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tempDir)
// Make a real context directory in this temp directory with a simple
// Dockerfile.
realContextDirname := filepath.Join(tempDir, "context")
if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil {
c.Fatal(err)
}
if err = ioutil.WriteFile(
filepath.Join(realContextDirname, "Dockerfile"),
[]byte(`
FROM busybox
RUN echo hello world
`),
os.FileMode(0644),
); err != nil {
c.Fatal(err)
}
// Make a symlink to the real context directory.
contextSymlinkName := filepath.Join(tempDir, "context_link")
if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil {
c.Fatal(err)
}
// Executing the build with the symlink as the specified context should
// *not* fail.
if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 {
c.Fatalf("build failed with exit status %d: %s", exitStatus, out)
}
}
func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) {
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create the releases role
s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the releases role
otherTag := fmt.Sprintf("%s:other", repoName)
dockerCmd(c, "tag", "busybox", otherTag)
pushCmd := exec.Command(dockerBinary, "push", otherTag)
s.trustedCmd(pushCmd)
out, _, err := runCommandWithOutput(pushCmd)
c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out))
s.assertTargetInRoles(c, repoName, "other", "targets/releases")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
out, status := dockerCmd(c, "rmi", otherTag)
c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out))
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildreleasesrole"
buildCmd := buildImageCmd(name, dockerFile, true)
s.trustedCmd(buildCmd)
out, _, err = runCommandWithOutput(buildCmd)
c.Assert(err, check.IsNil, check.Commentf("Trusted build failed: %s", out))
c.Assert(out, checker.Contains, fmt.Sprintf("FROM %s@sha", repoName))
}
func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) {
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create a non-releases delegation role
s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the other role
otherTag := fmt.Sprintf("%s:other", repoName)
dockerCmd(c, "tag", "busybox", otherTag)
pushCmd := exec.Command(dockerBinary, "push", otherTag)
s.trustedCmd(pushCmd)
out, _, err := runCommandWithOutput(pushCmd)
c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out))
s.assertTargetInRoles(c, repoName, "other", "targets/other")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
out, status := dockerCmd(c, "rmi", otherTag)
c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out))
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildotherrole"
buildCmd := buildImageCmd(name, dockerFile, true)
s.trustedCmd(buildCmd)
out, _, err = runCommandWithOutput(buildCmd)
c.Assert(err, check.NotNil, check.Commentf("Trusted build expected to fail: %s", out))
}
// Issue #15634: COPY fails when path starts with "null"
func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) {
name := "testbuildnullstringinaddcopyvolume"
volName := "nullvolume"
if daemonPlatform == "windows" {
volName = `C:\\nullvolume`
}
ctx, err := fakeContext(`
FROM busybox
ADD null /
COPY nullfile /
VOLUME `+volName+`
`,
map[string]string{
"null": "test1",
"nullfile": "test2",
},
)
c.Assert(err, check.IsNil)
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, check.IsNil)
}
func (s *DockerSuite) TestBuildStopSignal(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet
imgName := "test_build_stop_signal"
_, err := buildImage(imgName,
`FROM busybox
STOPSIGNAL SIGKILL`,
true)
c.Assert(err, check.IsNil)
res := inspectFieldJSON(c, imgName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
containerName := "test-container-stop-signal"
dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top")
res = inspectFieldJSON(c, containerName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
}
func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
args := []string{"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)}
var dockerfile string
if daemonPlatform == "windows" {
// Bugs in Windows busybox port - use the default base image and native cmd stuff
dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+`
ARG %s
RUN echo %%%s%%
CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey)
} else {
dockerfile = fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s
CMD echo $%s`, envKey, envKey, envKey)
}
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal)
}
containerName := "bldargCont"
out, _ := dockerCmd(c, "run", "--name", containerName, imgName)
out = strings.Trim(out, " \r\n'")
if out != "" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envDef := "bar1"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s`, envKey, envDef)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal)
}
out, _ := dockerCmd(c, "history", "--no-trunc", imgName)
outputTabs := strings.Split(out, "\n")[1]
if !strings.Contains(outputTabs, envDef) {
c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
origImgID := ""
var err error
if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil {
c.Fatal(err)
}
imgNameCache := "bldargtestcachehit"
if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID {
if err != nil {
c.Fatal(err)
}
c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
extraEnvKey := "foo1"
extraEnvVal := "bar1"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ARG %s
RUN echo $%s`, envKey, extraEnvKey, envKey)
origImgID := ""
var err error
if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil {
c.Fatal(err)
}
imgNameCache := "bldargtestcachemiss"
args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal))
if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID {
if err != nil {
c.Fatal(err)
}
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
newEnvVal := "bar1"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
origImgID := ""
var err error
if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil {
c.Fatal(err)
}
imgNameCache := "bldargtestcachemiss"
args = []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal),
}
if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID {
if err != nil {
c.Fatal(err)
}
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
RUN echo $%s
CMD echo $%s
`, envKey, envKey, envValOveride, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ENV %s %s
ARG %s
RUN echo $%s
CMD echo $%s
`, envKey, envValOveride, envKey, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
wdVar := "WDIR"
wdVal := "/tmp/"
addVar := "AFILE"
addVal := "addFile"
copyVar := "CFILE"
copyVal := "copyFile"
envVar := "foo"
envVal := "bar"
exposeVar := "EPORT"
exposeVal := "9999"
userVar := "USER"
userVal := "testUser"
volVar := "VOL"
volVal := "/testVol/"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal),
"--build-arg", fmt.Sprintf("%s=%s", addVar, addVal),
"--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal),
"--build-arg", fmt.Sprintf("%s=%s", envVar, envVal),
"--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal),
"--build-arg", fmt.Sprintf("%s=%s", userVar, userVal),
"--build-arg", fmt.Sprintf("%s=%s", volVar, volVal),
}
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
ARG %s
WORKDIR ${%s}
ARG %s
ADD ${%s} testDir/
ARG %s
COPY $%s testDir/
ARG %s
ENV %s=${%s}
ARG %s
EXPOSE $%s
ARG %s
USER $%s
ARG %s
VOLUME ${%s}`,
wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar,
envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar),
map[string]string{
addVal: "some stuff",
copyVal: "some stuff",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil {
c.Fatal(err)
}
var resMap map[string]interface{}
var resArr []string
res := ""
res = inspectField(c, imgName, "Config.WorkingDir")
if res != filepath.ToSlash(filepath.Clean(wdVal)) {
c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res)
}
inspectFieldAndMarshall(c, imgName, "Config.Env", &resArr)
found := false
for _, v := range resArr {
if fmt.Sprintf("%s=%s", envVar, envVal) == v {
found = true
break
}
}
if !found {
c.Fatalf("Config.Env value mismatch. Expected <key=value> to exist: %s=%s, got: %v",
envVar, envVal, resArr)
}
inspectFieldAndMarshall(c, imgName, "Config.ExposedPorts", &resMap)
if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok {
c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap)
}
res = inspectField(c, imgName, "Config.User")
if res != userVal {
c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res)
}
inspectFieldAndMarshall(c, imgName, "Config.Volumes", &resMap)
if _, ok := resMap[volVal]; !ok {
c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
envKey := "foo"
envVal := "bar"
envKey1 := "foo1"
envValOveride := "barOverride"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
ENV %s ${%s}
RUN echo $%s
CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
ARG %s
CMD echo $%s`, envKey, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("able to access environment variable in output: %q expected to be missing", out)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support --build-arg
imgName := "bldargtest"
envKey := "HTTP_PROXY"
envVal := "bar"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s
ENV %s $%s
RUN echo $%s
CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support --build-arg
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
errStr := "One or more build-args"
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err == nil {
c.Fatalf("build succeeded, expected to fail. Output: %v", out)
} else if !strings.Contains(out, errStr) {
c.Fatalf("Unexpected error. output: %q, expected error: %q", out, errStr)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
envKey3 := "foo3"
args := []string{}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=""
ARG %s=''
ARG %s="''"
ARG %s='""'
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3,
envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3,
envKey2, envKey3)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
args := []string{}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=
ARG %s=""
ARG %s=''
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
args := []string{}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN env`, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out)
}
}
func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) {
volName := "testname:/foo"
if daemonPlatform == "windows" {
volName = "testname:C:\\foo"
}
dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops")
dockerFile := `FROM busybox
VOLUME ` + volName + `
RUN ls /foo/oops
`
_, err := buildImage("test", dockerFile, false)
c.Assert(err, check.NotNil, check.Commentf("image build should have failed"))
}
func (s *DockerSuite) TestBuildTagEvent(c *check.C) {
since := daemonUnixTime(c)
dockerFile := `FROM busybox
RUN echo events
`
_, err := buildImage("test", dockerFile, false)
c.Assert(err, check.IsNil)
until := daemonUnixTime(c)
out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image")
events := strings.Split(strings.TrimSpace(out), "\n")
actions := eventActionsByIDAndType(c, events, "test:latest", "image")
var foundTag bool
for _, a := range actions {
if a == "tag" {
foundTag = true
break
}
}
c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out))
}
// #15780
func (s *DockerSuite) TestBuildMultipleTags(c *check.C) {
dockerfile := `
FROM busybox
MAINTAINER test-15780
`
cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2",
"-t", "tag1:latest", "-t", "tag1", "--no-cache", "-")
cmd.Stdin = strings.NewReader(dockerfile)
_, err := runCommand(cmd)
c.Assert(err, check.IsNil)
id1, err := getIDByName("tag1")
c.Assert(err, check.IsNil)
id2, err := getIDByName("tag2:v2")
c.Assert(err, check.IsNil)
c.Assert(id1, check.Equals, id2)
}
// #17290
func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildbrokensymlink"
ctx, err := fakeContext(`
FROM busybox
COPY . ./`,
map[string]string{
"foo": "bar",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
// warm up cache
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
// add new file to context, should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644)
c.Assert(err, checker.IsNil)
_, out, err := buildImageFromContextWithOut(name, ctx, true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Not(checker.Contains), "Using cache")
}
func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildbrokensymlink"
ctx, err := fakeContext(`
FROM busybox
COPY asymlink target`,
map[string]string{
"foo": "bar",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
id, err := buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target")
c.Assert(out, checker.Matches, "bar")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
id, out, err = buildImageFromContextWithOut(name, ctx, true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Not(checker.Contains), "Using cache")
out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target")
c.Assert(out, checker.Matches, "baz")
}
func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildbrokensymlink"
ctx, err := fakeContext(`
FROM busybox
COPY asymlink /`,
map[string]string{
"foo/abc": "bar",
"foo/def": "baz",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
id, err := buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def")
c.Assert(out, checker.Matches, "barbaz")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644)
c.Assert(err, checker.IsNil)
id, out, err = buildImageFromContextWithOut(name, ctx, true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Not(checker.Contains), "Using cache")
out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def")
c.Assert(out, checker.Matches, "barbax")
}
// TestBuildSymlinkBasename tests that target file gets basename from symlink,
// not from the target file.
func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildbrokensymlink"
ctx, err := fakeContext(`
FROM busybox
COPY asymlink /`,
map[string]string{
"foo": "bar",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
id, err := buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink")
c.Assert(out, checker.Matches, "bar")
}
// #17827
func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) {
name := "testbuildrootsource"
ctx, err := fakeContext(`
FROM busybox
COPY / /data`,
map[string]string{
"foo": "bar",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
// warm up cache
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
// change file, should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
_, out, err := buildImageFromContextWithOut(name, ctx, true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Not(checker.Contains), "Using cache")
}
// #19375
func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) {
cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git")
cmd.Env = append(cmd.Env, "PATH=")
out, _, err := runCommandWithOutput(cmd)
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ")
cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git")
cmd.Env = append(cmd.Env, "PATH=")
out, _, err = runCommandWithOutput(cmd)
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ")
}
// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir
func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildworkdirwindowspath"
_, err := buildImage(name, `
FROM `+WindowsBaseImage+`
RUN mkdir C:\\work
WORKDIR C:\\work
RUN if "%CD%" NEQ "C:\work" exit -1
`, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildLabel(c *check.C) {
name := "testbuildlabel"
testLabel := "foo"
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL default foo
`, false, "--label", testLabel)
c.Assert(err, checker.IsNil)
res := inspectFieldJSON(c, name, "Config.Labels")
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) {
name := "testbuildlabel"
_, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar")
c.Assert(err, checker.IsNil)
res, err := inspectImage(name, "json .Config.Labels")
c.Assert(err, checker.IsNil)
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
v, ok := labels["foo"]
if !ok {
c.Fatal("label `foo` not found in image")
}
c.Assert(v, checker.Equals, "bar")
}
func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) {
name := "testbuildlabelcachecommit"
testLabel := "foo"
if _, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL default foo
`, false); err != nil {
c.Fatal(err)
}
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL default foo
`, true, "--label", testLabel)
c.Assert(err, checker.IsNil)
res := inspectFieldJSON(c, name, "Config.Labels")
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) {
name := "testbuildlabelmultiple"
testLabels := map[string]string{
"foo": "bar",
"123": "456",
}
labelArgs := []string{}
for k, v := range testLabels {
labelArgs = append(labelArgs, "--label", k+"="+v)
}
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL default foo
`, false, labelArgs...)
if err != nil {
c.Fatal("error building image with labels", err)
}
res := inspectFieldJSON(c, name, "Config.Labels")
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
for k, v := range testLabels {
if x, ok := labels[k]; !ok || x != v {
c.Fatalf("label %s=%s not found in image", k, v)
}
}
}
func (s *DockerSuite) TestBuildLabelOverwrite(c *check.C) {
name := "testbuildlabeloverwrite"
testLabel := "foo"
testValue := "bar"
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL `+testLabel+`+ foo
`, false, []string{"--label", testLabel + "=" + testValue}...)
if err != nil {
c.Fatal("error building image with labels", err)
}
res := inspectFieldJSON(c, name, "Config.Labels")
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
v, ok := labels[testLabel]
if !ok {
c.Fatal("label not found in image")
}
if v != testValue {
c.Fatal("label not overwritten")
}
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) {
dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL)
baseImage := privateRegistryURL + "/baseimage"
_, err := buildImage(baseImage, `
FROM busybox
ENV env1 val1
`, true)
c.Assert(err, checker.IsNil)
dockerCmd(c, "push", baseImage)
dockerCmd(c, "rmi", baseImage)
_, err = buildImage(baseImage, fmt.Sprintf(`
FROM %s
ENV env2 val2
`, baseImage), true)
c.Assert(err, checker.IsNil)
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
// make sure the image is pulled when building
dockerCmd(c, "rmi", repoName)
buildCmd := exec.Command(dockerBinary, "--config", tmp, "build", "-")
buildCmd.Stdin = strings.NewReader(fmt.Sprintf("FROM %s", repoName))
out, _, err := runCommandWithOutput(buildCmd)
c.Assert(err, check.IsNil, check.Commentf(out))
}
// Test cases in #22036
func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) {
testRequires(c, DaemonIsLinux)
// Command line option labels will always override
name := "scratchy"
expected := `{"bar":"from-flag","foo":"from-flag"}`
_, err := buildImage(name,
`FROM scratch
LABEL foo=from-dockerfile`,
true, "--label", "foo=from-flag", "--label", "bar=from-flag")
c.Assert(err, check.IsNil)
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
name = "from"
expected = `{"foo":"from-dockerfile"}`
_, err = buildImage(name,
`FROM scratch
LABEL foo from-dockerfile`,
true)
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option label will override even via `FROM`
name = "new"
expected = `{"bar":"from-dockerfile2","foo":"new"}`
_, err = buildImage(name,
`FROM from
LABEL bar from-dockerfile2`,
true, "--label", "foo=new")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
name = "scratchy2"
expected = `{"bar":"","foo":""}`
_, err = buildImage(name,
`FROM scratch
LABEL foo=from-dockerfile`,
true, "--label", "foo", "--label", "bar=")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
// This time is for inherited images
name = "new2"
expected = `{"bar":"","foo":""}`
_, err = buildImage(name,
`FROM from
LABEL bar from-dockerfile2`,
true, "--label", "foo=", "--label", "bar")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with only `FROM`
name = "scratchy"
expected = `{"bar":"from-flag","foo":"from-flag"}`
_, err = buildImage(name,
`FROM scratch`,
true, "--label", "foo=from-flag", "--label", "bar=from-flag")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
// Test case for #22855
func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) {
name := "test-delete-committed-file"
_, err := buildImage(name,
`FROM busybox
RUN echo test > file
RUN test -e file
RUN rm file
RUN sh -c "! test -e file"`, false)
if err != nil {
c.Fatal(err)
}
}
// #20083
func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) {
// TODO Windows: Figure out why this test is flakey on TP5. If you add
// something like RUN sleep 5, or even RUN ls /tmp after the ADD line,
// it is more reliable, but that's not a good fix.
testRequires(c, DaemonIsLinux)
name := "testbuilddockerignorecleanpaths"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "(ls -la /tmp/#1)"
RUN sh -c "(! ls -la /tmp/#2)"
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "foo",
"foo2": "foo2",
"dir1/foo": "foo in dir1",
"#1": "# file 1",
"#2": "# file 2",
".dockerignore": `# Visual C++ cache files
# because we have git ;-)
# The above comment is from #20083
foo
#dir1/foo
foo2
# The following is considered as comment as # is at the beginning
#1
# The following is not considered as comment as # is not at the beginning
#2
`,
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Test case for #23221
func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) {
name := "test-with-utf8-bom"
dockerfile := []byte(`FROM busybox`)
bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...)
ctx, err := fakeContextFromNewTempDir()
c.Assert(err, check.IsNil)
defer ctx.Close()
err = ctx.addFile("Dockerfile", bomDockerfile)
c.Assert(err, check.IsNil)
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, check.IsNil)
}
// Test case for UTF-8 BOM in .dockerignore, related to #23221
func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) {
name := "test-with-utf8-bom-dockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls -la /tmp
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
dockerignore := []byte("./Dockerfile\n")
bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...)
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
})
c.Assert(err, check.IsNil)
defer ctx.Close()
err = ctx.addFile(".dockerignore", bomDockerignore)
c.Assert(err, check.IsNil)
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
}
// #22489 Shell test to confirm config gets updated correctly
func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) {
name := "testbuildshellupdatesconfig"
expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]`
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
SHELL ["foo", "-bar"]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
if res != expected {
c.Fatalf("%s, expected %s", res, expected)
}
res = inspectFieldJSON(c, name, "ContainerConfig.Shell")
if res != `["foo","-bar"]` {
c.Fatalf(`%s, expected ["foo","-bar"]`, res)
}
}
// #22489 Changing the shell multiple times and CMD after.
func (s *DockerSuite) TestBuildShellMultiple(c *check.C) {
name := "testbuildshellmultiple"
_, out, _, err := buildImageWithStdoutStderr(name,
`FROM busybox
RUN echo defaultshell
SHELL ["echo"]
RUN echoshell
SHELL ["ls"]
RUN -l
CMD -l`,
true)
if err != nil {
c.Fatal(err)
}
// Must contain 'defaultshell' twice
if len(strings.Split(out, "defaultshell")) != 3 {
c.Fatalf("defaultshell should have appeared twice in %s", out)
}
// Must contain 'echoshell' twice
if len(strings.Split(out, "echoshell")) != 3 {
c.Fatalf("echoshell should have appeared twice in %s", out)
}
// Must contain "total " (part of ls -l)
if !strings.Contains(out, "total ") {
c.Fatalf("%s should have contained 'total '", out)
}
// A container started from the image uses the shell-form CMD.
// Last shell is ls. CMD is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489. Changed SHELL with ENTRYPOINT
func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) {
name := "testbuildshellentrypoint"
_, err := buildImage(name,
`FROM busybox
SHELL ["ls"]
ENTRYPOINT -l`,
true)
if err != nil {
c.Fatal(err)
}
// A container started from the image uses the shell-form ENTRYPOINT.
// Shell is ls. ENTRYPOINT is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489 Shell test to confirm shell is inherited in a subsequent build
func (s *DockerSuite) TestBuildShellInherited(c *check.C) {
name1 := "testbuildshellinherited1"
_, err := buildImage(name1,
`FROM busybox
SHELL ["ls"]`,
true)
if err != nil {
c.Fatal(err)
}
name2 := "testbuildshellinherited2"
_, out, _, err := buildImageWithStdoutStderr(name2,
`FROM `+name1+`
RUN -l`,
true)
if err != nil {
c.Fatal(err)
}
// ls -l has "total " followed by some number in it, ls without -l does not.
if !strings.Contains(out, "total ") {
c.Fatalf("Should have seen total in 'ls -l'.\n%s", out)
}
}
// #22489 Shell test to confirm non-JSON doesn't work
func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) {
name := "testbuildshellnotjson"
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
sHeLl exec -form`, // Casing explicit to ensure error is upper-cased.
true)
if err == nil {
c.Fatal("Image build should have failed")
}
if !strings.Contains(err.Error(), "SHELL requires the arguments to be in JSON form") {
c.Fatal("Error didn't indicate that arguments must be in JSON form")
}
}
// #22489 Windows shell test to confirm native is powershell if executing a PS command
// This would error if the default shell were still cmd.
func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildshellpowershell"
_, out, err := buildImageWithOut(name,
`FROM `+minimalBaseImage()+`
SHELL ["powershell", "-command"]
RUN Write-Host John`,
true)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "\nJohn\n") {
c.Fatalf("Line with 'John' not found in output %q", out)
}
}
// #22868. Make sure shell-form CMD is marked as escaped in the config of the image
func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildcmdshellescaped"
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
CMD "ipconfig"
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.ArgsEscaped")
if res != "true" {
c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res)
}
dockerCmd(c, "run", "--name", "inspectme", name)
dockerCmd(c, "wait", "inspectme")
res = inspectFieldJSON(c, name, "Config.Cmd")
if res != `["cmd","/S","/C","\"ipconfig\""]` {
c.Fatalf("CMD was not escaped Config.Cmd: got %v", res)
}
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
hello_fastapi_project/hello_fastapi/backend/app/alembic/env.py | from __future__ import with_statement
import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
# target_metadata = None
from app.db.base import Base # noqa
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def get_url():
user = os.getenv("POSTGRES_USER", "postgres")
password = os.getenv("POSTGRES_PASSWORD", "1234")
server = os.getenv("POSTGRES_SERVER", "127.0.0.1:5432")
db = os.getenv("POSTGRES_DB", "hello_fastapi_db")
return f"postgresql://{user}:{password}@{server}/{db}"
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = get_url()
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True, compare_type=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
configuration = config.get_section(config.config_ini_section)
configuration["sqlalchemy.url"] = get_url()
connectable = engine_from_config(
configuration, prefix="sqlalchemy.", poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata, compare_type=True
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| []
| []
| [
"POSTGRES_SERVER",
"POSTGRES_PASSWORD",
"POSTGRES_USER",
"POSTGRES_DB"
]
| [] | ["POSTGRES_SERVER", "POSTGRES_PASSWORD", "POSTGRES_USER", "POSTGRES_DB"] | python | 4 | 0 | |
gateway/gateway.go | package gateway
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"mime"
"net/http"
"os"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/rakyll/statik/fs"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"github.com/mrisney/grpc-gateway-boilerplate/insecure"
pbExample "github.com/mrisney/grpc-gateway-boilerplate/proto"
// Static files
_ "github.com/mrisney/grpc-gateway-boilerplate/statik"
)
// getOpenAPIHandler serves an OpenAPI UI.
// Adapted from https://github.com/philips/grpc-gateway-example/blob/a269bcb5931ca92be0ceae6130ac27ae89582ecc/cmd/serve.go#L63
func getOpenAPIHandler() http.Handler {
mime.AddExtensionType(".svg", "image/svg+xml")
statikFS, err := fs.New()
if err != nil {
// Panic since this is a permanent error.
panic("creating OpenAPI filesystem: " + err.Error())
}
return http.FileServer(statikFS)
}
// Run runs the gRPC-Gateway, dialling the provided address.
func Run(dialAddr string) error {
// Adds gRPC internal logs. This is quite verbose, so adjust as desired!
log := grpclog.NewLoggerV2(os.Stdout, ioutil.Discard, ioutil.Discard)
grpclog.SetLoggerV2(log)
// Create a client connection to the gRPC Server we just started.
// This is where the gRPC-Gateway proxies the requests.
conn, err := grpc.DialContext(
context.Background(),
dialAddr,
grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(insecure.CertPool, "")),
grpc.WithBlock(),
)
if err != nil {
return fmt.Errorf("failed to dial server: %w", err)
}
gwmux := runtime.NewServeMux()
err = pbExample.RegisterUserServiceHandler(context.Background(), gwmux, conn)
if err != nil {
return fmt.Errorf("failed to register gateway: %w", err)
}
oa := getOpenAPIHandler()
port := os.Getenv("PORT")
if port == "" {
port = "11000"
}
gatewayAddr := "0.0.0.0:" + port
gwServer := &http.Server{
Addr: gatewayAddr,
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.HasPrefix(r.URL.Path, "/api") {
gwmux.ServeHTTP(w, r)
return
}
oa.ServeHTTP(w, r)
}),
}
// Empty parameters mean use the TLS Config specified with the server.
if strings.ToLower(os.Getenv("SERVE_HTTP")) == "true" {
log.Info("Serving gRPC-Gateway and OpenAPI Documentation on http://", gatewayAddr)
return fmt.Errorf("serving gRPC-Gateway server: %w", gwServer.ListenAndServe())
}
gwServer.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{insecure.Cert},
}
log.Info("Serving gRPC-Gateway and OpenAPI Documentation on https://", gatewayAddr)
return fmt.Errorf("serving gRPC-Gateway server: %w", gwServer.ListenAndServeTLS("", ""))
}
| [
"\"PORT\"",
"\"SERVE_HTTP\""
]
| []
| [
"PORT",
"SERVE_HTTP"
]
| [] | ["PORT", "SERVE_HTTP"] | go | 2 | 0 | |
ivy_tests/test_ivy/conftest.py | # global
import os
import pytest
from typing import Dict
from hypothesis import settings
settings.register_profile("default", max_examples=100, deadline=None)
settings.load_profile("default")
# local
from ivy_tests.test_ivy import helpers
from ivy import clear_framework_stack, DefaultDevice
FW_STRS = ["numpy", "jax", "tensorflow", "torch", "mxnet"]
TEST_FRAMEWORKS: Dict[str, callable] = {
"numpy": lambda: helpers.get_ivy_numpy(),
"jax": lambda: helpers.get_ivy_jax(),
"tensorflow": lambda: helpers.get_ivy_tensorflow(),
"torch": lambda: helpers.get_ivy_torch(),
"mxnet": lambda: helpers.get_ivy_mxnet(),
}
TEST_CALL_METHODS: Dict[str, callable] = {
"numpy": helpers.np_call,
"jax": helpers.jnp_call,
"tensorflow": helpers.tf_call,
"torch": helpers.torch_call,
"mxnet": helpers.mx_call,
}
if "ARRAY_API_TESTS_MODULE" not in os.environ:
os.environ["ARRAY_API_TESTS_MODULE"] = "ivy.functional.backends.numpy"
@pytest.fixture(autouse=True)
def run_around_tests(device, f, compile_graph, implicit, call, fw):
if "gpu" in device and call is helpers.np_call:
# Numpy does not support GPU
pytest.skip()
clear_framework_stack()
with f.use:
with DefaultDevice(device):
yield
def pytest_generate_tests(metafunc):
# device
raw_value = metafunc.config.getoption("--device")
if raw_value == "all":
devices = ["cpu", "gpu:0", "tpu:0"]
else:
devices = raw_value.split(",")
# framework
raw_value = metafunc.config.getoption("--framework")
if raw_value == "all":
f_strs = TEST_FRAMEWORKS.keys()
else:
f_strs = raw_value.split(",")
# compile_graph
raw_value = metafunc.config.getoption("--compile_graph")
if raw_value == "both":
compile_modes = [True, False]
elif raw_value == "true":
compile_modes = [True]
else:
compile_modes = [False]
# implicit
raw_value = metafunc.config.getoption("--with_implicit")
if raw_value == "true":
implicit_modes = [True, False]
else:
implicit_modes = [False]
# create test configs
configs = list()
for f_str in f_strs:
for device in devices:
for compile_graph in compile_modes:
for implicit in implicit_modes:
configs.append(
(
device,
TEST_FRAMEWORKS[f_str](),
compile_graph,
implicit,
TEST_CALL_METHODS[f_str],
f_str,
)
)
metafunc.parametrize("device,f,compile_graph,implicit,call,fw", configs)
def pytest_addoption(parser):
parser.addoption("--device", action="store", default="cpu")
parser.addoption(
"--framework", action="store", default="jax,numpy,tensorflow,torch"
)
parser.addoption("--compile_graph", action="store", default="true")
parser.addoption("--with_implicit", action="store", default="false")
| []
| []
| [
"ARRAY_API_TESTS_MODULE"
]
| [] | ["ARRAY_API_TESTS_MODULE"] | python | 1 | 0 | |
agent/app/balena/operator.py | # -*- coding: utf-8 -*-
import os
from dataclasses import dataclass
from typing import Optional
import requests
@dataclass
class Operator:
"""The Operator is used to interact with the Balena Supervisor API.
The operator is connected to the Supervisor API via REST, and is
used to fetch and send data to the API.
Attributes:
supervisor_address: The base url of the Supervisor API.
host_config_endpoint: The Supervisor endpoint to access the host config.
reboot_endpoint: The Supervisor endpoint to reboot the device.
supervisor_api_key: The API key to authenticate against the Supervisor.
host_config_url: The full url to access the host config.
reboot_url: The full url to reboot the device.
session: A session to improve performance.
Methods:
get_hostname: Get the hostname.
set_hostname: Set the hostname and reboot.
"""
supervisor_address: Optional[str] = os.getenv("BALENA_SUPERVISOR_ADDRESS")
host_config_endpoint: str = "/v1/device/host-config?apikey="
reboot_endpoint: str = "/v1/reboot?apikey="
supervisor_api_key: Optional[str] = os.getenv("BALENA_SUPERVISOR_API_KEY")
host_config_url: str = ""
reboot_url: str = ""
session: requests.Session = requests.Session()
def __post_init__(self) -> None:
"""Initialize the Operator.
Setup the base urls of the API and open-up a session.
"""
self.host_config_url = f"{self.supervisor_address}{self.host_config_endpoint}{self.supervisor_api_key}"
self.reboot_url = (
f"{self.supervisor_address}{self.reboot_endpoint}{self.supervisor_api_key}"
)
def get_hostname(self) -> Optional[str]:
"""Fetch hostname from the Supervisor API.
It is used to fetch the current hostname coming from
the Supervisor API in order to check if the hostname is correct.
Returns:
The current hostname
"""
return (
self.session.get(url=self.host_config_url)
.json()
.get("network")
.get("hostname")
)
def set_hostname(self, hostname: str) -> None:
"""Send the hostname to the Supervisor API.
It is used to update the hostname of the device.
If need be, it will also reboot the device for the hostname to be flushed.
Args:
hostname:
The hostname to be set on the device.
"""
if not hostname == self.get_hostname():
print(f"setting hostname to {hostname} and rebooting...")
data: dict = {"network": {"hostname": hostname}}
headers: dict = {"Content-Type": "application/json"}
self.session.patch(url=self.host_config_url, json=data, headers=headers)
self.session.post(url=self.reboot_url, headers=headers)
else:
print(f"hostname already set to {hostname}. skipping... ")
| []
| []
| [
"BALENA_SUPERVISOR_ADDRESS",
"BALENA_SUPERVISOR_API_KEY"
]
| [] | ["BALENA_SUPERVISOR_ADDRESS", "BALENA_SUPERVISOR_API_KEY"] | python | 2 | 0 | |
examples/external_models/turicreate/run_turicreate.py | import os
os.environ["OMP_NUM_THREADS"] = "10"
import sys
import pandas as pd
import numpy as np
import turicreate as tc
for i in range(1, 14):
print("running batch %d" % i)
batch = pd.read_csv("batches/batch_%d_train.dat" % i)
test_users = pd.read_csv("batches/batch_%d_test.dat" % i)
model = tc.ranking_factorization_recommender.create(
tc.SFrame(batch),
'user',
'item',
num_factors=10,
verbose=True,
solver='ials',
max_iterations=50,
ials_confidence_scaling_factor=30
)
results = model.recommend(users=test_users.user.values, k=100, exclude_known=True, verbose=False)
results.to_dataframe()[['user', 'item', 'rank']].to_csv('batches/batch_%d_predictions.dat' % i, sep=' ', header=False, index=False) | []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.