filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test/run_test.py | #!/usr/bin/env python3
import argparse
import copy
from datetime import datetime
from distutils.util import strtobool
from distutils.version import LooseVersion
import functools
import os
import pathlib
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import (
FILE_SCHEMA,
IS_IN_CI,
TEST_WITH_ROCM,
shell,
set_cwd,
parser as common_parser,
)
import torch.distributed as dist
from typing import Dict, Optional, List
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
try:
# using tools/ to optimize test run.
sys.path.append(str(REPO_ROOT))
from tools.testing.test_selections import (
export_S3_test_times,
get_shard_based_on_S3,
# NS: Disable target determination
# get_slow_tests_based_on_S3,
get_specified_test_cases,
get_reordered_tests,
get_test_case_configs,
)
# NS: Disable target determination
# from tools.testing.modulefinder_determinator import (
# should_run_test,
# TARGET_DET_LIST,
# )
HAVE_TEST_SELECTION_TOOLS = True
except ImportError:
HAVE_TEST_SELECTION_TOOLS = False
print(
"Unable to import test_selections from tools/testing. Running without test selection stats..."
)
def discover_tests(
base_dir: Optional[pathlib.Path] = None,
blocklisted_patterns: Optional[List[str]] = None,
blocklisted_tests: Optional[List[str]] = None,
extra_tests: Optional[List[str]] = None) -> List[str]:
"""
Searches for all python files starting with test_ excluding one specified by patterns
"""
def skip_test_p(name: str) -> bool:
rc = False
if blocklisted_patterns is not None:
rc |= any(name.startswith(pattern) for pattern in blocklisted_patterns)
if blocklisted_tests is not None:
rc |= name in blocklisted_tests
return rc
cwd = pathlib.Path(__file__).resolve().parent if base_dir is None else base_dir
all_py_files = list(cwd.glob('**/test_*.py'))
rc = [str(fname.relative_to(cwd))[:-3] for fname in all_py_files]
# Invert slashes on Windows
if sys.platform == "win32":
rc = [name.replace('\\', '/') for name in rc]
rc = [test for test in rc if not skip_test_p(test)]
if extra_tests is not None:
rc += extra_tests
return sorted(rc)
TESTS = discover_tests(
blocklisted_patterns=[
'ao',
'bottleneck_test',
'custom_backend',
'custom_operator',
'fx', # executed by test_fx.py
'jit', # executed by test_jit.py
'mobile',
'onnx',
'package', # executed by test_package.py
'quantization', # executed by test_quantization.py
'autograd', # executed by test_autograd.py
],
blocklisted_tests=[
'test_bundled_images',
'test_cpp_extensions_aot',
'test_determination',
'test_jit_fuser',
'test_jit_simple',
'test_jit_string',
'test_kernel_launch_checks',
'test_metal',
'test_nnapi',
'test_segment_reductions',
'test_static_runtime',
'test_throughput_benchmark',
'test_typing',
"distributed/algorithms/ddp_comm_hooks/test_ddp_hooks",
"distributed/algorithms/quantization/test_quantization",
"distributed/bin/test_script",
"distributed/elastic/multiprocessing/bin/test_script",
"distributed/launcher/bin/test_script",
"distributed/launcher/bin/test_script_init_method",
"distributed/launcher/bin/test_script_is_torchelastic_launched",
"distributed/launcher/bin/test_script_local_rank",
"distributed/test_c10d_spawn",
'distributions/test_transforms',
'distributions/test_utils',
],
extra_tests=[
"test_cpp_extensions_aot_ninja",
"test_cpp_extensions_aot_no_ninja",
"distributed/elastic/timer/api_test",
"distributed/elastic/timer/local_timer_example",
"distributed/elastic/timer/local_timer_test",
"distributed/elastic/events/lib_test",
"distributed/elastic/metrics/api_test",
"distributed/elastic/utils/logging_test",
"distributed/elastic/utils/util_test",
"distributed/elastic/utils/distributed_test",
"distributed/elastic/multiprocessing/api_test",
"test_deploy",
]
)
FSDP_TEST = [test for test in TESTS if test.startswith("distributed/fsdp")]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
"distributed/pipeline/sync/skip/test_api",
"distributed/pipeline/sync/skip/test_gpipe",
"distributed/pipeline/sync/skip/test_inspect_skip_layout",
"distributed/pipeline/sync/skip/test_leak",
"distributed/pipeline/sync/skip/test_portal",
"distributed/pipeline/sync/skip/test_stash_pop",
"distributed/pipeline/sync/skip/test_tracker",
"distributed/pipeline/sync/skip/test_verify_skippables",
"distributed/pipeline/sync/test_balance",
"distributed/pipeline/sync/test_bugs",
"distributed/pipeline/sync/test_checkpoint",
"distributed/pipeline/sync/test_copy",
"distributed/pipeline/sync/test_deferred_batch_norm",
"distributed/pipeline/sync/test_dependency",
"distributed/pipeline/sync/test_inplace",
"distributed/pipeline/sync/test_microbatch",
"distributed/pipeline/sync/test_phony",
"distributed/pipeline/sync/test_pipe",
"distributed/pipeline/sync/test_pipeline",
"distributed/pipeline/sync/test_stream",
"distributed/pipeline/sync/test_transparency",
"distributed/pipeline/sync/test_worker",
"distributions/test_constraints",
"distributions/test_transforms",
"distributions/test_utils",
"test_typing",
"distributed/elastic/events/lib_test",
"distributed/elastic/agent/server/test/api_test",
"test_deploy",
]
WINDOWS_BLOCKLIST = [
"distributed/nn/jit/test_instantiator",
"distributed/rpc/test_faulty_agent",
"distributed/rpc/test_tensorpipe_agent",
"distributed/rpc/test_share_memory",
"distributed/rpc/cuda/test_tensorpipe_agent",
"distributed/pipeline/sync/skip/test_api",
"distributed/pipeline/sync/skip/test_gpipe",
"distributed/pipeline/sync/skip/test_inspect_skip_layout",
"distributed/pipeline/sync/skip/test_leak",
"distributed/pipeline/sync/skip/test_portal",
"distributed/pipeline/sync/skip/test_stash_pop",
"distributed/pipeline/sync/skip/test_tracker",
"distributed/pipeline/sync/skip/test_verify_skippables",
"distributed/pipeline/sync/test_balance",
"distributed/pipeline/sync/test_bugs",
"distributed/pipeline/sync/test_checkpoint",
"distributed/pipeline/sync/test_copy",
"distributed/pipeline/sync/test_deferred_batch_norm",
"distributed/pipeline/sync/test_dependency",
"distributed/pipeline/sync/test_inplace",
"distributed/pipeline/sync/test_microbatch",
"distributed/pipeline/sync/test_phony",
"distributed/pipeline/sync/test_pipe",
"distributed/pipeline/sync/test_pipeline",
"distributed/pipeline/sync/test_stream",
"distributed/pipeline/sync/test_transparency",
"distributed/pipeline/sync/test_worker",
"distributed/elastic/agent/server/test/api_test",
"distributed/elastic/multiprocessing/api_test",
"distributed/_shard/checkpoint/test_checkpoint"
"distributed/_shard/checkpoint/test_file_system_checkpoint"
"distributed/_shard/sharding_spec/test_sharding_spec",
"distributed/_shard/sharding_plan/test_sharding_plan",
"distributed/_shard/sharded_tensor/test_megatron_prototype",
"distributed/_shard/sharded_tensor/test_sharded_tensor",
"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard",
"distributed/_shard/sharded_tensor/ops/test_chunk",
"distributed/_shard/sharded_tensor/ops/test_elementwise_ops",
"distributed/_shard/sharded_tensor/ops/test_embedding",
"distributed/_shard/sharded_tensor/ops/test_embedding_bag",
"distributed/_shard/sharded_tensor/ops/test_binary_cmp",
"distributed/_shard/sharded_tensor/ops/test_init",
"distributed/_shard/sharded_tensor/ops/test_linear",
"distributed/_shard/sharded_tensor/ops/test_math_ops",
"distributed/_shard/sharded_tensor/ops/test_matrix_ops",
"distributed/_shard/sharded_tensor/ops/test_softmax",
"distributed/_shard/sharded_optim/test_sharded_optim",
"distributed/_shard/test_partial_tensor",
"distributed/_shard/test_replicated_tensor",
] + FSDP_TEST
ROCM_BLOCKLIST = [
"distributed/nn/jit/test_instantiator",
"distributed/rpc/test_faulty_agent",
"distributed/rpc/test_tensorpipe_agent",
"distributed/rpc/test_share_memory",
"distributed/rpc/cuda/test_tensorpipe_agent",
"distributed/_shard/checkpoint/test_checkpoint"
"distributed/_shard/checkpoint/test_file_system_checkpoint"
"distributed/_shard/sharding_spec/test_sharding_spec",
"distributed/_shard/sharding_plan/test_sharding_plan",
"distributed/_shard/sharded_tensor/test_megatron_prototype",
"distributed/_shard/sharded_tensor/test_sharded_tensor",
"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard",
"distributed/_shard/sharded_tensor/ops/test_chunk",
"distributed/_shard/sharded_tensor/ops/test_elementwise_ops",
"distributed/_shard/sharded_tensor/ops/test_embedding",
"distributed/_shard/sharded_tensor/ops/test_embedding_bag",
"distributed/_shard/sharded_tensor/ops/test_binary_cmp",
"distributed/_shard/sharded_tensor/ops/test_init",
"distributed/_shard/sharded_tensor/ops/test_linear",
"distributed/_shard/sharded_tensor/ops/test_math_ops",
"distributed/_shard/sharded_tensor/ops/test_matrix_ops",
"distributed/_shard/sharded_tensor/ops/test_softmax",
"distributed/_shard/sharded_optim/test_sharded_optim",
"distributed/_shard/test_partial_tensor",
"distributed/_shard/test_replicated_tensor",
"test_determination",
"test_jit_legacy",
"test_type_hints",
"test_openmp",
]
RUN_PARALLEL_BLOCKLIST = [
"test_cpp_extensions_jit",
"test_jit_disabled",
"test_mobile_optimizer",
"test_multiprocessing",
"test_multiprocessing_spawn",
"test_namedtuple_return_api",
"test_overrides",
"test_show_pickle",
"test_tensorexpr",
"test_cuda_primary_ctx",
] + FSDP_TEST
WINDOWS_COVERAGE_BLOCKLIST = []
# A subset of our TEST list that validates PyTorch's ops, modules, and autograd function as expected
CORE_TEST_LIST = [
"test_autograd",
"test_modules",
"test_nn",
"test_ops",
"test_ops_gradients",
"test_ops_jit",
"test_torch"
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = ".pytorch-test-times.json"
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG["test"] = {"WORLD_SIZE": "1"}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG["mpi"] = {
"WORLD_SIZE": "3",
"TEST_REPORT_SOURCE_OVERRIDE": "dist-mpi",
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG["nccl"] = {
"WORLD_SIZE": "2" if torch.cuda.device_count() == 2 else "3",
"TEST_REPORT_SOURCE_OVERRIDE": "dist-nccl",
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG["gloo"] = {
"WORLD_SIZE": "2" if torch.cuda.device_count() == 2 else "3",
"TEST_REPORT_SOURCE_OVERRIDE": "dist-gloo",
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {
getattr(signal, n): n for n in dir(signal) if n.startswith("SIG") and "_" not in n
}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
ENABLE_PR_HISTORY_REORDERING = bool(
os.environ.get("ENABLE_PR_HISTORY_REORDERING", "0") == "1"
)
JIT_EXECUTOR_TESTS = [
"test_jit_profiling",
"test_jit_legacy",
"test_jit_fuser_legacy",
]
DISTRIBUTED_TESTS = [test for test in TESTS if test.startswith("distributed")]
TESTS_REQUIRING_LAPACK = [
"distributions/test_constraints",
"distributions/test_distributions",
]
# Dictionary matching test modules (in TESTS) to lists of test cases (within that test_module) that would be run when
# options.run_specified_test_cases is enabled.
# For example:
# {
# "test_nn": ["test_doubletensor_avg_pool3d", "test_share_memory", "test_hook_requires_grad"],
# ...
# }
# then for test_nn.py, we would ONLY run test_doubletensor_avg_pool3d, test_share_memory, and test_hook_requires_grad.
SPECIFIED_TEST_CASES_DICT: Dict[str, List[str]] = {}
# The file from which the SPECIFIED_TEST_CASES_DICT will be filled, a CSV of test cases that would be run when
# options.run_specified_test_cases is enabled.
SPECIFIED_TEST_CASES_FILE: str = ".pytorch_specified_test_cases.csv"
def print_to_stderr(message):
print(message, file=sys.stderr)
def get_test_case_args(test_module, using_pytest) -> List[str]:
args = []
# if test_module not specified or specified with '__all__' then run all tests
if (
test_module not in SPECIFIED_TEST_CASES_DICT
or "__all__" in SPECIFIED_TEST_CASES_DICT[test_module]
):
return args
if using_pytest:
args.append("-k")
args.append(" or ".join(SPECIFIED_TEST_CASES_DICT[test_module]))
else:
for test in SPECIFIED_TEST_CASES_DICT[test_module]:
args.append("-k")
args.append(test)
return args
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ["coverage", "run", "--parallel-mode", "--source=torch"]
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ["-m", "pytest"]
else:
print_to_stderr(
"Pytest cannot be used for this test. Falling back to unittest."
)
return executable
def run_test(
test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None
):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [
arg for arg in unittest_args if not arg.startswith("--run-parallel")
]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != "-f" else "-x" for arg in unittest_args]
elif IS_IN_CI:
# use the downloaded test cases configuration, not supported in pytest
unittest_args.extend(["--import-slow-tests", "--import-disabled-tests"])
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = (
sys.platform == "win32" and test_module in WINDOWS_COVERAGE_BLOCKLIST
)
# Extra arguments are not supported with pytest
executable = get_executable_command(
options, allow_pytest=not extra_unittest_args, disable_coverage=disable_coverage
)
# TODO: move this logic into common_utils.py instead of passing in "-k" individually
# The following logic for running specified tests will only run for non-distributed tests, as those are dispatched
# to test_distributed and not run_test (this function)
if options.run_specified_test_cases:
unittest_args.extend(get_test_case_args(test_module, "pytest" in executable))
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + ".py"] + unittest_args
command = (launcher_cmd or []) + executable + argv
print_to_stderr("Executing {} ... [{}]".format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(
test_module, test_directory, options, extra_unittest_args=["--subprocess"]
)
run_test_with_subprocess = functools.partial(run_test, extra_unittest_args=["--subprocess"])
def get_run_test_with_subprocess_fn():
return lambda test_module, test_directory, options: run_test_with_subprocess(test_module, test_directory, options)
def _test_cpp_extensions_aot(test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, "cpp_extensions")
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, "build")
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env["USE_NINJA"] = str(1 if use_ninja else 0)
cmd = [sys.executable, "setup.py", "install", "--root", "./install"]
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != "win32":
return_code = shell(
cmd,
cwd=os.path.join(cpp_extensions_test_dir, "no_python_abi_suffix_test"),
env=shell_env,
)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get("PYTHONPATH", "")
from shutil import copyfile
test_module = "test_cpp_extensions_aot" + ("_ninja" if use_ninja else "_no_ninja")
copyfile(
test_directory + "/test_cpp_extensions_aot.py",
test_directory + "/" + test_module + ".py",
)
try:
cpp_extensions = os.path.join(test_directory, "cpp_extensions")
install_directory = ""
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, "install")):
for directory in directories:
if "-packages" in directory:
install_directory = os.path.join(root, directory)
assert install_directory, "install_directory must not be empty"
os.environ["PYTHONPATH"] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ["PYTHONPATH"] = python_path
if os.path.exists(test_directory + "/" + test_module + ".py"):
os.remove(test_directory + "/" + test_module + ".py")
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
mpi_available = subprocess.call(
"command -v mpiexec", shell=True
) == 0 and sys.version_info < (3, 9)
if options.verbose and not mpi_available:
print_to_stderr("MPI not available -- MPI backend tests will be skipped")
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == "win32" and backend != "gloo":
continue
if backend == "mpi" and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == "win32" and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
"Running distributed tests for the {} backend {}".format(
backend, with_init
)
)
old_environ = dict(os.environ)
os.environ["TEMP_DIR"] = tmp_dir
os.environ["BACKEND"] = backend
os.environ["INIT_METHOD"] = "env://"
os.environ.update(env_vars)
if with_init_file:
if test_module == "test_distributed_spawn":
init_method = f"{FILE_SCHEMA}{tmp_dir}/"
else:
init_method = f"{FILE_SCHEMA}{tmp_dir}/shared_init_file"
os.environ["INIT_METHOD"] = init_method
try:
os.mkdir(os.path.join(tmp_dir, "barrier"))
os.mkdir(os.path.join(tmp_dir, "test_dir"))
if backend == "mpi":
# test mpiexec for --noprefix option
with open(os.devnull, "w") as devnull:
allowrunasroot_opt = (
"--allow-run-as-root"
if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""',
shell=True,
stdout=devnull,
stderr=subprocess.STDOUT,
)
== 0
else ""
)
noprefix_opt = (
"--noprefix"
if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""',
shell=True,
stdout=devnull,
stderr=subprocess.STDOUT,
)
== 0
else ""
)
mpiexec = ["mpiexec", "-n", "3", noprefix_opt, allowrunasroot_opt]
return_code = run_test(
test_module, test_directory, options, launcher_cmd=mpiexec
)
else:
return_code = run_test(test_module, test_directory, options, extra_unittest_args=["--subprocess"])
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
os.environ.clear()
os.environ.update(old_environ)
return 0
CUSTOM_HANDLERS = {
"test_cuda_primary_ctx": test_cuda_primary_ctx,
"test_cpp_extensions_aot_no_ninja": test_cpp_extensions_aot_no_ninja,
"test_cpp_extensions_aot_ninja": test_cpp_extensions_aot_ninja,
"distributed/test_distributed_spawn": test_distributed,
"distributed/test_c10d_nccl": get_run_test_with_subprocess_fn(),
"distributed/test_c10d_gloo": get_run_test_with_subprocess_fn(),
"distributed/test_c10d_common": get_run_test_with_subprocess_fn(),
"distributed/test_c10d_spawn_gloo": get_run_test_with_subprocess_fn(),
"distributed/test_c10d_spawn_nccl": get_run_test_with_subprocess_fn(),
"distributed/test_store": get_run_test_with_subprocess_fn(),
"distributed/test_pg_wrapper": get_run_test_with_subprocess_fn(),
"distributed/rpc/test_faulty_agent": get_run_test_with_subprocess_fn(),
"distributed/rpc/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
"distributed/rpc/test_share_memory": get_run_test_with_subprocess_fn(),
"distributed/rpc/cuda/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
}
def parse_test_module(test):
return test.split(".")[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description="Run the PyTorch unit test suite",
epilog="where TESTS is any of: {}".format(", ".join(TESTS)),
formatter_class=argparse.RawTextHelpFormatter,
parents=[common_parser]
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="print verbose information and test-by-test results",
)
parser.add_argument("--jit", "--jit", action="store_true", help="run all jit tests")
parser.add_argument(
"--distributed-tests",
"--distributed-tests",
action="store_true",
help="run all distributed tests",
)
parser.add_argument(
"-core",
"--core",
action="store_true",
help="Only run core tests, or tests that validate PyTorch's ops, modules,"
"and autograd. They are defined by CORE_TEST_LIST."
)
parser.add_argument(
"-pt",
"--pytest",
action="store_true",
help="If true, use `pytest` to execute the tests. E.g., this runs "
"TestTorch with pytest in verbose and coverage mode: "
"python run_test.py -vci torch -pt",
)
parser.add_argument(
"-c",
"--coverage",
action="store_true",
help="enable coverage",
default=PYTORCH_COLLECT_COVERAGE,
)
parser.add_argument(
"-i",
"--include",
nargs="+",
choices=TestChoices(TESTS),
default=TESTS,
metavar="TESTS",
help="select a set of tests to include (defaults to ALL tests)."
" tests must be a part of the TESTS list defined in run_test.py",
)
parser.add_argument(
"-x",
"--exclude",
nargs="+",
choices=TESTS,
metavar="TESTS",
default=[],
help="select a set of tests to exclude",
)
parser.add_argument(
"-f",
"--first",
choices=TESTS,
metavar="TESTS",
help="select the test to start from (excludes previous tests)",
)
parser.add_argument(
"-l",
"--last",
choices=TESTS,
metavar="TESTS",
help="select the last test to run (excludes following tests)",
)
parser.add_argument(
"--bring-to-front",
nargs="+",
choices=TestChoices(TESTS),
default=[],
metavar="TESTS",
help="select a set of tests to run first. This can be used in situations"
" where you want to run all tests, but care more about some set, "
"e.g. after making a change to a specific component",
)
parser.add_argument(
"--ignore-win-blocklist",
action="store_true",
help="always run blocklisted windows tests",
)
# NS: Disable target determination until it can be made more reliable
# parser.add_argument(
# "--determine-from",
# help="File of affected source filenames to determine which tests to run.",
# )
parser.add_argument(
"--continue-through-error",
action="store_true",
help="Runs the full test suite despite one of the tests failing",
default=strtobool(os.environ.get("CONTINUE_THROUGH_ERROR", "False")),
)
parser.add_argument(
"additional_unittest_args",
nargs="*",
help="additional arguments passed through to unittest, e.g., "
"python run_test.py -i sparse -- TestSparse.test_factory_size_check",
)
parser.add_argument(
"--export-past-test-times",
nargs="?",
type=str,
const=TEST_TIMES_FILE,
help="dumps test times from previous S3 stats into a file, format JSON",
)
parser.add_argument(
"--shard",
nargs=2,
type=int,
help="runs a shard of the tests (taking into account other selections), e.g., "
"--shard 2 3 will break up the selected tests into 3 shards and run the tests "
"in the 2nd shard (the first number should not exceed the second)",
)
parser.add_argument(
"--exclude-jit-executor",
action="store_true",
help="exclude tests that are run for a specific jit config",
)
parser.add_argument(
"--exclude-distributed-tests",
action="store_true",
help="exclude distributed tests",
)
parser.add_argument(
"--run-specified-test-cases",
nargs="?",
type=str,
const=SPECIFIED_TEST_CASES_FILE,
help="load specified test cases file dumped from previous OSS CI stats, format CSV. "
" If all test cases should run for a <test_module> please add a single row: \n"
" test_filename,test_case_name\n"
" ...\n"
" <test_module>,__all__\n"
" ...\n"
'how we use the stats will be based on option "--use-specified-test-cases-by".',
)
parser.add_argument(
"--use-specified-test-cases-by",
type=str,
choices=["include", "bring-to-front"],
default="include",
help='used together with option "--run-specified-test-cases". When specified test case '
"file is set, this option allows the user to control whether to only run the specified test "
"modules or to simply bring the specified modules to front and also run the remaining "
"modules. Note: regardless of this option, we will only run the specified test cases "
" within a specified test module. For unspecified test modules with the bring-to-front "
"option, all test cases will be run, as one may expect.",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Only list the test that will run.",
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr("Excluding {} {}".format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
# First make sure run specific test cases options are processed.
if options.run_specified_test_cases:
if options.use_specified_test_cases_by == "include":
options.include = list(SPECIFIED_TEST_CASES_DICT.keys())
elif options.use_specified_test_cases_by == "bring-to-front":
options.bring_to_front = list(SPECIFIED_TEST_CASES_DICT.keys())
selected_tests = options.include
# filter if there's JIT only and distributed only test options
if options.jit:
selected_tests = list(
filter(lambda test_name: "jit" in test_name, selected_tests)
)
if options.distributed_tests:
selected_tests = list(
filter(lambda test_name: test_name in DISTRIBUTED_TESTS, selected_tests)
)
# Filter to only run core tests when --core option is specified
if options.core:
selected_tests = list(
filter(lambda test_name: test_name in CORE_TEST_LIST, selected_tests)
)
# process reordering
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(
filter(lambda name: name not in to_front, selected_tests)
)
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[: last_index + 1]
# process exclusion
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
if options.exclude_distributed_tests:
options.exclude.extend(DISTRIBUTED_TESTS)
# these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375
if torch.version.cuda is not None and LooseVersion(torch.version.cuda) == "11.6":
options.exclude.extend(["distributions/test_constraints"])
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == "win32" and not options.ignore_win_blocklist:
target_arch = os.environ.get("VSCMD_ARG_TGT_ARCH")
if target_arch != "x64":
WINDOWS_BLOCKLIST.append("cpp_extensions_aot_no_ninja")
WINDOWS_BLOCKLIST.append("cpp_extensions_aot_ninja")
WINDOWS_BLOCKLIST.append("cpp_extensions_jit")
WINDOWS_BLOCKLIST.append("jit")
WINDOWS_BLOCKLIST.append("jit_fuser")
# This is exception that's caused by this issue https://github.com/pytorch/pytorch/issues/69460
# This below code should be removed once this issue is solved
if torch.version.cuda is not None and LooseVersion(torch.version.cuda) >= "11.5":
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot")
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot_ninja")
WINDOWS_BLOCKLIST.append("test_cpp_extensions_aot_no_ninja")
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, "on Windows")
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, "on ROCm")
# sharding
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert (
which_shard <= num_shards
), "Selected shard must be less than or equal to total number of shards"
assert num_shards <= len(
selected_tests
), f"Number of shards must be less than {len(selected_tests)}"
# TODO: fix this to use test_times_filename, but currently this is not working
# because setting the export arg immeidately halts the test execution.
selected_tests = get_shard_based_on_S3(
which_shard, num_shards, selected_tests, TEST_TIMES_FILE
)
# skip all distributed tests if distributed package is not available.
if not dist.is_available():
selected_tests = exclude_tests(DISTRIBUTED_TESTS, selected_tests,
"PyTorch is built without distributed support.")
# skip tests that require LAPACK when it's not available
if not torch._C.has_lapack:
selected_tests = exclude_tests(TESTS_REQUIRING_LAPACK, selected_tests,
"PyTorch is built without LAPACK support.")
return selected_tests
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr("Running {} ... [{}]".format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool
), "Return code should be an integer"
if return_code == 0:
return None
message = f"{test} failed!"
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f" Received signal: {signal_name}"
return message
def main():
options = parse_args()
# TODO: move this export & download function in tools/ folder
test_times_filename = options.export_past_test_times
if test_times_filename:
print(
f"Exporting past test times from S3 to {test_times_filename}, no tests will be run."
)
export_S3_test_times(test_times_filename)
return
specified_test_cases_filename = options.run_specified_test_cases
if specified_test_cases_filename:
print(
f"Loading specified test cases to run from {specified_test_cases_filename}."
)
global SPECIFIED_TEST_CASES_DICT
SPECIFIED_TEST_CASES_DICT = get_specified_test_cases(
specified_test_cases_filename, TESTS
)
test_directory = str(REPO_ROOT / "test")
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr("Selected tests:\n {}".format("\n ".join(selected_tests)))
if options.dry_run:
return
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(["coverage", "erase"])
# NS: Disable target determination until it can be made more reliable
# if options.determine_from is not None and os.path.exists(options.determine_from):
# slow_tests = get_slow_tests_based_on_S3(
# TESTS, TARGET_DET_LIST, SLOW_TEST_THRESHOLD
# )
# print_to_stderr(
# "Added the following tests to target_det tests as calculated based on S3:"
# )
# print_to_stderr(slow_tests)
# with open(options.determine_from, "r") as fh:
# touched_files = [
# os.path.normpath(name.strip())
# for name in fh.read().split("\n")
# if len(name.strip()) > 0
# ]
# # HACK: Ensure the 'test' paths can be traversed by Modulefinder
# sys.path.append(test_directory)
# selected_tests = [
# test
# for test in selected_tests
# if should_run_test(
# TARGET_DET_LIST + slow_tests, test, touched_files, options
# )
# ]
# sys.path.remove(test_directory)
if IS_IN_CI:
selected_tests = get_reordered_tests(
selected_tests, ENABLE_PR_HISTORY_REORDERING
)
# downloading test cases configuration to local environment
get_test_case_configs(dirpath=test_directory)
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
with set_cwd(test_directory):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == "__main__":
main()
| []
| []
| [
"TEMP_DIR",
"INIT_METHOD",
"BACKEND",
"CONTINUE_THROUGH_ERROR",
"VSCMD_ARG_TGT_ARCH",
"ENABLE_PR_HISTORY_REORDERING",
"PYTORCH_COLLECT_COVERAGE",
"PYTHONPATH"
]
| [] | ["TEMP_DIR", "INIT_METHOD", "BACKEND", "CONTINUE_THROUGH_ERROR", "VSCMD_ARG_TGT_ARCH", "ENABLE_PR_HISTORY_REORDERING", "PYTORCH_COLLECT_COVERAGE", "PYTHONPATH"] | python | 8 | 0 | |
src/net/java/sip/communicator/plugin/thunderbird/ThunderbirdConfigForm.java | /*
* Jitsi, the OpenSource Java VoIP and Instant Messaging client.
*
* Copyright @ 2015 Atlassian Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.java.sip.communicator.plugin.thunderbird;
import static net.java.sip.communicator.plugin.thunderbird
.ThunderbirdContactSourceService.*;
import java.awt.*;
import java.awt.event.*;
import java.io.*;
import java.util.List;
import javax.swing.*;
import javax.swing.event.*;
import org.jitsi.service.configuration.*;
import org.jitsi.service.resources.*;
import org.jitsi.util.*;
import net.java.sip.communicator.service.gui.*;
import net.java.sip.communicator.plugin.desktoputil.*;
/**
* This ConfigurationForm shows the list of Thunderbird address books
* and allow users to manage them.
*
* @author Ingo Bauersachs
*/
public class ThunderbirdConfigForm
extends TransparentPanel
implements ConfigurationForm,
ActionListener,
DocumentListener
{
/** Serial version UID. */
private static final long serialVersionUID = 0L;
/** Resource service */
private ResourceManagementService R = ThunderbirdActivator.getResources();
private JTextField txtFilename;
private JTextField txtPrefix;
private JButton cmdBrowse;
private JCheckBox chkEnabled;
/**
* Creates a new instance of this class.
*/
public ThunderbirdConfigForm()
{
super(new BorderLayout());
this.initComponents();
}
/**
* Inits the swing components
*/
private void initComponents()
{
JPanel pnl = new TransparentPanel();
pnl.setLayout(new GridLayout(0, 2));
add(pnl, BorderLayout.NORTH);
chkEnabled = new SIPCommCheckBox(
R.getI18NString("plugin.thunderbird.ENABLED"));
pnl.add(chkEnabled);
pnl.add(new JLabel("")); //empty to wrap the grid to the next line
txtFilename = new JTextField();
txtFilename.setEditable(false);
pnl.add(txtFilename);
cmdBrowse = new JButton(R.getI18NString("service.gui.BROWSE") + "...");
pnl.add(cmdBrowse);
JLabel lblPrefix = new JLabel(
R.getI18NString("plugin.thunderbird.PHONE_PREFIX"));
pnl.add(lblPrefix);
txtPrefix = new JTextField();
txtPrefix.getDocument().addDocumentListener(this);
pnl.add(txtPrefix);
List<ThunderbirdContactSourceService> activeServices
= ThunderbirdActivator.getActiveServices();
if (activeServices.size() > 0)
{
chkEnabled.setSelected(true);
ThunderbirdContactSourceService service = activeServices.get(0);
txtFilename.setText(service.getFilename());
txtPrefix.setText(service.getPhoneNumberPrefix());
}
else
{
chkEnabled.setSelected(false);
}
updateStates();
chkEnabled.addActionListener(this);
txtFilename.getDocument().addDocumentListener(this);
cmdBrowse.addActionListener(this);
}
/**
* @see net.java.sip.communicator.service.gui.ConfigurationForm#getTitle
*/
public String getTitle()
{
return R.getI18NString("plugin.thunderbird.CONFIG_FORM_TITLE");
}
/**
* @see net.java.sip.communicator.service.gui.ConfigurationForm#getIcon
*/
public byte[] getIcon()
{
return null;
}
/**
* @see net.java.sip.communicator.service.gui.ConfigurationForm#getForm
*/
public Object getForm()
{
return this;
}
/**
* Required by ConfigurationForm interface
*
* Returns the index of this configuration form in the configuration window.
* This index is used to put configuration forms in the desired order.
* <p>
* 0 is the first position
* -1 means that the form will be put at the end
* </p>
* @return the index of this configuration form in the configuration window.
*
* @see net.java.sip.communicator.service.gui.ConfigurationForm#getIndex
*/
public int getIndex()
{
return 3;
}
/**
* Processes buttons events (new, modify, remove)
*
* @see java.awt.event.ActionListener#actionPerformed
*/
public void actionPerformed(ActionEvent e)
{
if (e.getSource() == cmdBrowse)
{
browseForMab();
ThunderbirdActivator.getActiveServices().get(0)
.setFilename(txtFilename.getText());
}
else if (e.getSource() == chkEnabled)
{
if (chkEnabled.isSelected())
{
browseForMab();
if (txtFilename.getText() != null)
{
String bprop = PNAME_BASE_THUNDERBIRD_CONFIG + ".1";
ConfigurationService config
= ThunderbirdActivator.getConfigService();
config.setProperty(bprop, "1");
config.setProperty(bprop + "." + PNAME_INDEX, 1);
config.setProperty(bprop + "." + PNAME_FILENAME,
txtFilename.getText());
config.setProperty(bprop + "." + PNAME_DISPLAYNAME,
"Thunderbird");
config.setProperty(bprop + "." + PNAME_PREFIX,
txtPrefix.getText());
ThunderbirdActivator.add(bprop);
}
}
else
{
for (ThunderbirdContactSourceService svc
: ThunderbirdActivator.getActiveServices())
{
ThunderbirdActivator.remove(svc);
}
txtFilename.setText(null);
txtPrefix.setText(null);
}
updateStates();
}
}
/**
* Opens a file browser dialog to select a Thunderbird .mab file. If the
* user has chosen an existing file, the name is set to the filename
* textbox.
*/
private void browseForMab()
{
FilenameFilter ff = new FilenameFilter()
{
public boolean accept(File dir, String name)
{
String extension = "";
int i = name.lastIndexOf('.');
if (i > 0)
{
extension = name.substring(i + 1);
}
return "mab".equals(extension);
}
};
FileDialog fd = new FileDialog((Frame)null);
fd.setFilenameFilter(ff);
if (OSUtils.IS_WINDOWS)
{
File f = new File(
new File(
System.getenv("APPDATA"), "Thunderbird"),
"Profiles");
if (f.exists())
{
fd.setDirectory(f.getAbsolutePath());
}
}
else if (OSUtils.IS_LINUX)
{
File f = new File(
System.getProperty("user.home"),
".thunderbird");
if (!f.exists())
{
f = new File(
System.getProperty("user.home"),
".mozilla-thunderbird");
}
if (f.exists())
{
fd.setDirectory(f.getAbsolutePath());
}
}
else if (OSUtils.IS_MAC)
{
File f = new File(
System.getProperty("user.home"),
"/Library/Profiles");
if (!f.exists())
{
f = new File(
System.getProperty("user.home"),
"Application Support/Thunderbird/Profiles");
}
if (f.exists())
{
fd.setDirectory(f.getAbsolutePath());
}
}
fd.setVisible(true);
if (fd.getFile() != null)
{
File f = new File(fd.getDirectory(), fd.getFile());
if (f.exists())
{
txtFilename.setText(f.getAbsolutePath());
}
}
}
/**
* Enables or disables the controls enabled state based on the enabled
* checkbox.
*/
private void updateStates()
{
txtFilename.setEnabled(chkEnabled.isSelected());
txtPrefix.setEnabled(chkEnabled.isSelected());
cmdBrowse.setEnabled(chkEnabled.isSelected());
}
/**
* Indicates if this is an advanced configuration form.
* @return <tt>true</tt> if this is an advanced configuration form,
* otherwise it returns <tt>false</tt>
*/
public boolean isAdvanced()
{
return true;
}
/*
* (non-Javadoc)
*
* @see javax.swing.event.DocumentListener#insertUpdate(javax.swing.event.
* DocumentEvent)
*/
public void insertUpdate(DocumentEvent e)
{
changedUpdate(e);
}
/*
* (non-Javadoc)
*
* @see javax.swing.event.DocumentListener#removeUpdate(javax.swing.event.
* DocumentEvent)
*/
public void removeUpdate(DocumentEvent e)
{
changedUpdate(e);
}
/*
* (non-Javadoc)
*
* @see javax.swing.event.DocumentListener#changedUpdate(javax.swing.event.
* DocumentEvent)
*/
public void changedUpdate(DocumentEvent e)
{
if (e.getDocument() == txtPrefix.getDocument())
{
ThunderbirdActivator.getActiveServices().get(0)
.setPhoneNumberPrefix(txtPrefix.getText());
}
}
}
| [
"\"APPDATA\""
]
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | java | 1 | 0 | |
sector/util/default_root.py | import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("SECTOR_ROOT", "~/.sector/mainnet"))).resolve()
| []
| []
| [
"SECTOR_ROOT"
]
| [] | ["SECTOR_ROOT"] | python | 1 | 0 | |
orderer/common/server/main_test.go | // Copyright IBM Corp. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package server
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/crypto/tlsgen"
deliver_mocks "github.com/hyperledger/fabric/common/deliver/mock"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/flogging/floggingtest"
ledger_mocks "github.com/hyperledger/fabric/common/ledger/blockledger/mocks"
ramledger "github.com/hyperledger/fabric/common/ledger/blockledger/ram"
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/metrics/prometheus"
"github.com/hyperledger/fabric/core/comm"
"github.com/hyperledger/fabric/core/config/configtest"
"github.com/hyperledger/fabric/internal/configtxgen/configtxgentest"
"github.com/hyperledger/fabric/internal/configtxgen/encoder"
genesisconfig "github.com/hyperledger/fabric/internal/configtxgen/localconfig"
"github.com/hyperledger/fabric/internal/pkg/identity"
"github.com/hyperledger/fabric/orderer/common/cluster"
"github.com/hyperledger/fabric/orderer/common/localconfig"
"github.com/hyperledger/fabric/orderer/common/multichannel"
server_mocks "github.com/hyperledger/fabric/orderer/common/server/mocks"
"github.com/hyperledger/fabric/orderer/consensus"
"github.com/hyperledger/fabric/protos/common"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
//go:generate counterfeiter -o mocks/signer_serializer.go --fake-name SignerSerializer . signerSerializer
type signerSerializer interface {
identity.SignerSerializer
}
func TestInitializeLogging(t *testing.T) {
origEnvValue := os.Getenv("FABRIC_LOGGING_SPEC")
os.Setenv("FABRIC_LOGGING_SPEC", "foo=debug")
initializeLogging()
assert.Equal(t, "debug", flogging.Global.Level("foo").String())
os.Setenv("FABRIC_LOGGING_SPEC", origEnvValue)
}
func TestInitializeProfilingService(t *testing.T) {
origEnvValue := os.Getenv("FABRIC_LOGGING_SPEC")
defer os.Setenv("FABRIC_LOGGING_SPEC", origEnvValue)
os.Setenv("FABRIC_LOGGING_SPEC", "debug")
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
initializeProfilingService(
&localconfig.TopLevel{
General: localconfig.General{
Profile: localconfig.Profile{
Enabled: true,
Address: listenAddr,
}},
Kafka: localconfig.Kafka{Verbose: true},
},
)
time.Sleep(500 * time.Millisecond)
if _, err := http.Get("http://" + listenAddr + "/" + "/debug/"); err != nil {
t.Logf("Expected pprof to be up (will retry again in 3 seconds): %s", err)
time.Sleep(3 * time.Second)
if _, err := http.Get("http://" + listenAddr + "/" + "/debug/"); err != nil {
t.Fatalf("Expected pprof to be up: %s", err)
}
}
}
func TestInitializeServerConfig(t *testing.T) {
conf := &localconfig.TopLevel{
General: localconfig.General{
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
Certificate: "main.go",
PrivateKey: "main.go",
RootCAs: []string{"main.go"},
ClientRootCAs: []string{"main.go"},
},
},
}
sc := initializeServerConfig(conf, nil)
defaultOpts := comm.DefaultKeepaliveOptions
assert.Equal(t, defaultOpts.ServerMinInterval, sc.KaOpts.ServerMinInterval)
assert.Equal(t, time.Duration(0), sc.KaOpts.ServerInterval)
assert.Equal(t, time.Duration(0), sc.KaOpts.ServerTimeout)
testDuration := 10 * time.Second
conf.General.Keepalive = localconfig.Keepalive{
ServerMinInterval: testDuration,
ServerInterval: testDuration,
ServerTimeout: testDuration,
}
sc = initializeServerConfig(conf, nil)
assert.Equal(t, testDuration, sc.KaOpts.ServerMinInterval)
assert.Equal(t, testDuration, sc.KaOpts.ServerInterval)
assert.Equal(t, testDuration, sc.KaOpts.ServerTimeout)
sc = initializeServerConfig(conf, nil)
assert.NotNil(t, sc.Logger)
assert.Equal(t, &disabled.Provider{}, sc.MetricsProvider)
assert.Len(t, sc.UnaryInterceptors, 2)
assert.Len(t, sc.StreamInterceptors, 2)
sc = initializeServerConfig(conf, &prometheus.Provider{})
assert.Equal(t, &prometheus.Provider{}, sc.MetricsProvider)
goodFile := "main.go"
badFile := "does_not_exist"
oldLogger := logger
defer func() { logger = oldLogger }()
logger, _ = floggingtest.NewTestLogger(t)
testCases := []struct {
name string
certificate string
privateKey string
rootCA string
clientRootCert string
clusterCert string
clusterKey string
clusterCA string
}{
{"BadCertificate", badFile, goodFile, goodFile, goodFile, "", "", ""},
{"BadPrivateKey", goodFile, badFile, goodFile, goodFile, "", "", ""},
{"BadRootCA", goodFile, goodFile, badFile, goodFile, "", "", ""},
{"BadClientRootCertificate", goodFile, goodFile, goodFile, badFile, "", "", ""},
{"ClusterBadCertificate", goodFile, goodFile, goodFile, goodFile, badFile, goodFile, goodFile},
{"ClusterBadPrivateKey", goodFile, goodFile, goodFile, goodFile, goodFile, badFile, goodFile},
{"ClusterBadRootCA", goodFile, goodFile, goodFile, goodFile, goodFile, goodFile, badFile},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
conf := &localconfig.TopLevel{
General: localconfig.General{
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
Certificate: tc.certificate,
PrivateKey: tc.privateKey,
RootCAs: []string{tc.rootCA},
ClientRootCAs: []string{tc.clientRootCert},
},
Cluster: localconfig.Cluster{
ClientCertificate: tc.clusterCert,
ClientPrivateKey: tc.clusterKey,
RootCAs: []string{tc.clusterCA},
},
},
}
assert.Panics(t, func() {
if tc.clusterCert == "" {
initializeServerConfig(conf, nil)
} else {
initializeClusterClientConfig(conf)
}
},
)
})
}
}
func TestInitializeBootstrapChannel(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
testCases := []struct {
genesisMethod string
ledgerType string
panics bool
}{
{"provisional", "ram", false},
{"provisional", "file", false},
{"provisional", "json", false},
{"invalid", "ram", true},
{"file", "ram", true},
}
for _, tc := range testCases {
t.Run(tc.genesisMethod+"/"+tc.ledgerType, func(t *testing.T) {
fileLedgerLocation, _ := ioutil.TempDir("", "test-ledger")
ledgerFactory, _ := createLedgerFactory(
&localconfig.TopLevel{
General: localconfig.General{LedgerType: tc.ledgerType},
FileLedger: localconfig.FileLedger{
Location: fileLedgerLocation,
},
},
)
bootstrapConfig := &localconfig.TopLevel{
General: localconfig.General{
GenesisMethod: tc.genesisMethod,
GenesisProfile: "SampleSingleMSPSolo",
GenesisFile: "genesisblock",
SystemChannel: genesisconfig.TestChainID,
},
}
if tc.panics {
assert.Panics(t, func() {
genesisBlock := extractBootstrapBlock(bootstrapConfig)
initializeBootstrapChannel(genesisBlock, ledgerFactory)
})
} else {
assert.NotPanics(t, func() {
genesisBlock := extractBootstrapBlock(bootstrapConfig)
initializeBootstrapChannel(genesisBlock, ledgerFactory)
})
}
})
}
}
func TestInitializeLocalMsp(t *testing.T) {
t.Run("Happy", func(t *testing.T) {
assert.NotPanics(t, func() {
localMSPDir, _ := configtest.GetDevMspDir()
initializeLocalMsp(
&localconfig.TopLevel{
General: localconfig.General{
LocalMSPDir: localMSPDir,
LocalMSPID: "SampleOrg",
BCCSP: &factory.FactoryOpts{
ProviderName: "SW",
SwOpts: &factory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: true,
},
},
},
})
})
})
t.Run("Error", func(t *testing.T) {
oldLogger := logger
defer func() { logger = oldLogger }()
logger, _ = floggingtest.NewTestLogger(t)
assert.Panics(t, func() {
initializeLocalMsp(
&localconfig.TopLevel{
General: localconfig.General{
LocalMSPDir: "",
LocalMSPID: "",
},
})
})
})
}
func TestInitializeMultiChainManager(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
conf := genesisConfig(t)
assert.NotPanics(t, func() {
initializeLocalMsp(conf)
signer := &server_mocks.SignerSerializer{}
lf, _ := createLedgerFactory(conf)
bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system")
initializeMultichannelRegistrar(bootBlock, &replicationInitiator{}, &cluster.PredicateDialer{}, comm.ServerConfig{}, nil, conf, signer, &disabled.Provider{}, &server_mocks.HealthChecker{}, lf)
})
}
func TestInitializeGrpcServer(t *testing.T) {
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
host := strings.Split(listenAddr, ":")[0]
port, _ := strconv.ParseUint(strings.Split(listenAddr, ":")[1], 10, 16)
conf := &localconfig.TopLevel{
General: localconfig.General{
ListenAddress: host,
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: false,
ClientAuthRequired: false,
},
},
}
assert.NotPanics(t, func() {
grpcServer := initializeGrpcServer(conf, initializeServerConfig(conf, nil))
grpcServer.Listener().Close()
})
}
func TestUpdateTrustedRoots(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
initializeLocalMsp(genesisConfig(t))
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
port, _ := strconv.ParseUint(strings.Split(listenAddr, ":")[1], 10, 16)
conf := &localconfig.TopLevel{
General: localconfig.General{
ListenAddress: "localhost",
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: false,
ClientAuthRequired: false,
},
},
}
grpcServer := initializeGrpcServer(conf, initializeServerConfig(conf, nil))
caMgr := &caManager{
appRootCAsByChain: make(map[string][][]byte),
ordererRootCAsByChain: make(map[string][][]byte),
}
callback := func(bundle *channelconfig.Bundle) {
if grpcServer.MutualTLSRequired() {
t.Log("callback called")
caMgr.updateTrustedRoots(bundle, grpcServer)
}
}
lf, _ := createLedgerFactory(conf)
bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system")
signer := &server_mocks.SignerSerializer{}
initializeMultichannelRegistrar(
bootBlock,
&replicationInitiator{},
&cluster.PredicateDialer{},
comm.ServerConfig{},
nil,
genesisConfig(t),
signer,
&disabled.Provider{},
&server_mocks.HealthChecker{},
lf,
callback,
)
t.Logf("# app CAs: %d", len(caMgr.appRootCAsByChain[genesisconfig.TestChainID]))
t.Logf("# orderer CAs: %d", len(caMgr.ordererRootCAsByChain[genesisconfig.TestChainID]))
// mutual TLS not required so no updates should have occurred
assert.Equal(t, 0, len(caMgr.appRootCAsByChain[genesisconfig.TestChainID]))
assert.Equal(t, 0, len(caMgr.ordererRootCAsByChain[genesisconfig.TestChainID]))
grpcServer.Listener().Close()
conf = &localconfig.TopLevel{
General: localconfig.General{
ListenAddress: "localhost",
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
PrivateKey: filepath.Join(".", "testdata", "tls", "server.key"),
Certificate: filepath.Join(".", "testdata", "tls", "server.crt"),
},
},
}
grpcServer = initializeGrpcServer(conf, initializeServerConfig(conf, nil))
caMgr = &caManager{
appRootCAsByChain: make(map[string][][]byte),
ordererRootCAsByChain: make(map[string][][]byte),
}
predDialer := &cluster.PredicateDialer{}
clusterConf := initializeClusterClientConfig(conf)
predDialer.SetConfig(clusterConf)
callback = func(bundle *channelconfig.Bundle) {
if grpcServer.MutualTLSRequired() {
t.Log("callback called")
caMgr.updateTrustedRoots(bundle, grpcServer)
caMgr.updateClusterDialer(predDialer, clusterConf.SecOpts.ServerRootCAs)
}
}
initializeMultichannelRegistrar(
bootBlock,
&replicationInitiator{},
&cluster.PredicateDialer{},
comm.ServerConfig{},
nil,
genesisConfig(t),
signer,
&disabled.Provider{},
&server_mocks.HealthChecker{},
lf,
callback,
)
t.Logf("# app CAs: %d", len(caMgr.appRootCAsByChain[genesisconfig.TestChainID]))
t.Logf("# orderer CAs: %d", len(caMgr.ordererRootCAsByChain[genesisconfig.TestChainID]))
// mutual TLS is required so updates should have occurred
// we expect an intermediate and root CA for apps and orderers
assert.Equal(t, 2, len(caMgr.appRootCAsByChain[genesisconfig.TestChainID]))
assert.Equal(t, 2, len(caMgr.ordererRootCAsByChain[genesisconfig.TestChainID]))
assert.Len(t, predDialer.Config.Load().(comm.ClientConfig).SecOpts.ServerRootCAs, 2)
grpcServer.Listener().Close()
}
func TestConfigureClusterListener(t *testing.T) {
logEntries := make(chan string, 100)
allocatePort := func() uint16 {
l, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
_, portStr, err := net.SplitHostPort(l.Addr().String())
assert.NoError(t, err)
port, err := strconv.ParseInt(portStr, 10, 64)
assert.NoError(t, err)
assert.NoError(t, l.Close())
t.Log("picked unused port", port)
return uint16(port)
}
unUsedPort := allocatePort()
backupLogger := logger
logger = logger.With(zap.Hooks(func(entry zapcore.Entry) error {
logEntries <- entry.Message
return nil
}))
defer func() {
logger = backupLogger
}()
ca, err := tlsgen.NewCA()
assert.NoError(t, err)
serverKeyPair, err := ca.NewServerCertKeyPair("127.0.0.1")
assert.NoError(t, err)
loadPEM := func(fileName string) ([]byte, error) {
switch fileName {
case "cert":
return serverKeyPair.Cert, nil
case "key":
return serverKeyPair.Key, nil
case "ca":
return ca.CertBytes(), nil
default:
return nil, errors.New("I/O error")
}
}
for _, testCase := range []struct {
name string
conf *localconfig.TopLevel
generalConf comm.ServerConfig
generalSrv *comm.GRPCServer
shouldBeEqual bool
expectedPanic string
expectedLogEntries []string
}{
{
name: "no separate listener",
shouldBeEqual: true,
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{},
generalSrv: &comm.GRPCServer{},
},
{
name: "partial configuration",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenPort: 5000,
},
},
},
expectedPanic: "Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, " +
"General.Cluster.ServerCertificate, General.Cluster.ServerPrivateKey, should be defined altogether.",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, " +
"General.Cluster.ServerCertificate," +
" General.Cluster.ServerPrivateKey, should be defined altogether."},
},
{
name: "invalid certificate",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "bad",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: "Failed to load cluster server certificate from 'bad' (I/O error)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load cluster server certificate from 'bad' (I/O error)"},
},
{
name: "invalid key",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "bad",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: "Failed to load cluster server key from 'bad' (I/O error)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load cluster server certificate from 'bad' (I/O error)"},
},
{
name: "invalid ca cert",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"bad"},
},
},
},
expectedPanic: "Failed to load CA cert file 'I/O error' (bad)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load CA cert file 'I/O error' (bad)"},
},
{
name: "bad listen address",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "99.99.99.99",
ListenPort: unUsedPort,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: fmt.Sprintf("Failed creating gRPC server on 99.99.99.99:%d due "+
"to listen tcp 99.99.99.99:%d:", unUsedPort, unUsedPort),
generalSrv: &comm.GRPCServer{},
},
{
name: "green path",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
generalSrv: &comm.GRPCServer{},
},
} {
t.Run(testCase.name, func(t *testing.T) {
if testCase.shouldBeEqual {
conf, srv := configureClusterListener(testCase.conf, testCase.generalConf, testCase.generalSrv, loadPEM)
assert.Equal(t, conf, testCase.generalConf)
assert.Equal(t, srv, testCase.generalSrv)
}
if testCase.expectedPanic != "" {
f := func() {
configureClusterListener(testCase.conf, testCase.generalConf, testCase.generalSrv, loadPEM)
}
assert.Contains(t, panicMsg(f), testCase.expectedPanic)
} else {
configureClusterListener(testCase.conf, testCase.generalConf, testCase.generalSrv, loadPEM)
}
// Ensure logged messages that are expected were all logged
var loggedMessages []string
for len(logEntries) > 0 {
logEntry := <-logEntries
loggedMessages = append(loggedMessages, logEntry)
}
assert.Subset(t, testCase.expectedLogEntries, loggedMessages)
})
}
}
func TestInitializeEtcdraftConsenter(t *testing.T) {
consenters := make(map[string]consensus.Consenter)
rlf := ramledger.New(10)
conf := configtxgentest.Load(genesisconfig.SampleInsecureSoloProfile)
genesisBlock := encoder.New(conf).GenesisBlock()
ca, _ := tlsgen.NewCA()
crt, _ := ca.NewServerCertKeyPair("127.0.0.1")
srv, err := comm.NewGRPCServer("127.0.0.1:0", comm.ServerConfig{})
assert.NoError(t, err)
initializeEtcdraftConsenter(consenters,
&localconfig.TopLevel{},
rlf,
&cluster.PredicateDialer{},
genesisBlock, &replicationInitiator{},
comm.ServerConfig{
SecOpts: &comm.SecureOptions{
Certificate: crt.Cert,
Key: crt.Key,
UseTLS: true,
},
}, srv, &multichannel.Registrar{}, &disabled.Provider{})
assert.NotNil(t, consenters["etcdraft"])
}
func genesisConfig(t *testing.T) *localconfig.TopLevel {
t.Helper()
localMSPDir, _ := configtest.GetDevMspDir()
return &localconfig.TopLevel{
General: localconfig.General{
LedgerType: "ram",
GenesisMethod: "provisional",
GenesisProfile: "SampleDevModeSolo",
SystemChannel: genesisconfig.TestChainID,
LocalMSPDir: localMSPDir,
LocalMSPID: "SampleOrg",
BCCSP: &factory.FactoryOpts{
ProviderName: "SW",
SwOpts: &factory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: true,
},
},
},
}
}
func panicMsg(f func()) string {
var message interface{}
func() {
defer func() {
message = recover()
}()
f()
}()
return message.(string)
}
func TestCreateReplicator(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system")
iterator := &deliver_mocks.BlockIterator{}
iterator.NextReturnsOnCall(0, bootBlock, common.Status_SUCCESS)
iterator.NextReturnsOnCall(1, bootBlock, common.Status_SUCCESS)
ledger := &ledger_mocks.ReadWriter{}
ledger.On("Height").Return(uint64(1))
ledger.On("Iterator", mock.Anything).Return(iterator, uint64(1))
ledgerFactory := &server_mocks.Factory{}
ledgerFactory.On("GetOrCreate", "mychannel").Return(ledger, nil)
ledgerFactory.On("ChainIDs").Return([]string{"mychannel"})
signer := &server_mocks.SignerSerializer{}
r := createReplicator(ledgerFactory, bootBlock, &localconfig.TopLevel{}, &comm.SecureOptions{}, signer)
err := r.verifierRetriever.RetrieveVerifier("mychannel").VerifyBlockSignature(nil, nil)
assert.EqualError(t, err, "implicit policy evaluation failed - 0 sub-policies were satisfied, but this policy requires 1 of the 'Writers' sub-policies to be satisfied")
err = r.verifierRetriever.RetrieveVerifier("system").VerifyBlockSignature(nil, nil)
assert.NoError(t, err)
}
| [
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_SPEC\""
]
| []
| [
"FABRIC_LOGGING_SPEC"
]
| [] | ["FABRIC_LOGGING_SPEC"] | go | 1 | 0 | |
go/src/github.com/hashicorp/terraform/builtin/providers/vcd/resource_vcd_vapp_test.go | package vcd
import (
"fmt"
"os"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/hmrc/vmware-govcd"
)
func TestAccVcdVApp_PowerOff(t *testing.T) {
var vapp govcd.VApp
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckVcdVAppDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: fmt.Sprintf(testAccCheckVcdVApp_basic, os.Getenv("VCD_EDGE_GATWEWAY")),
Check: resource.ComposeTestCheckFunc(
testAccCheckVcdVAppExists("vcd_vapp.foobar", &vapp),
testAccCheckVcdVAppAttributes(&vapp),
resource.TestCheckResourceAttr(
"vcd_vapp.foobar", "name", "foobar"),
resource.TestCheckResourceAttr(
"vcd_vapp.foobar", "ip", "10.10.102.160"),
resource.TestCheckResourceAttr(
"vcd_vapp.foobar", "power_on", "true"),
),
},
resource.TestStep{
Config: fmt.Sprintf(testAccCheckVcdVApp_powerOff, os.Getenv("VCD_EDGE_GATWEWAY")),
Check: resource.ComposeTestCheckFunc(
testAccCheckVcdVAppExists("vcd_vapp.foobar", &vapp),
testAccCheckVcdVAppAttributes_off(&vapp),
resource.TestCheckResourceAttr(
"vcd_vapp.foobar", "name", "foobar"),
resource.TestCheckResourceAttr(
"vcd_vapp.foobar", "ip", "10.10.102.160"),
resource.TestCheckResourceAttr(
"vcd_vapp.foobar", "power_on", "false"),
),
},
},
})
}
func testAccCheckVcdVAppExists(n string, vapp *govcd.VApp) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No VAPP ID is set")
}
conn := testAccProvider.Meta().(*VCDClient)
resp, err := conn.OrgVdc.FindVAppByName(rs.Primary.ID)
if err != nil {
return err
}
*vapp = resp
return nil
}
}
func testAccCheckVcdVAppDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*VCDClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "vcd_vapp" {
continue
}
_, err := conn.OrgVdc.FindVAppByName(rs.Primary.ID)
if err == nil {
return fmt.Errorf("VPCs still exist")
}
return nil
}
return nil
}
func testAccCheckVcdVAppAttributes(vapp *govcd.VApp) resource.TestCheckFunc {
return func(s *terraform.State) error {
if vapp.VApp.Name != "foobar" {
return fmt.Errorf("Bad name: %s", vapp.VApp.Name)
}
if vapp.VApp.Name != vapp.VApp.Children.VM[0].Name {
return fmt.Errorf("VApp and VM names do not match. %s != %s",
vapp.VApp.Name, vapp.VApp.Children.VM[0].Name)
}
status, _ := vapp.GetStatus()
if status != "POWERED_ON" {
return fmt.Errorf("VApp is not powered on")
}
return nil
}
}
func testAccCheckVcdVAppAttributes_off(vapp *govcd.VApp) resource.TestCheckFunc {
return func(s *terraform.State) error {
if vapp.VApp.Name != "foobar" {
return fmt.Errorf("Bad name: %s", vapp.VApp.Name)
}
if vapp.VApp.Name != vapp.VApp.Children.VM[0].Name {
return fmt.Errorf("VApp and VM names do not match. %s != %s",
vapp.VApp.Name, vapp.VApp.Children.VM[0].Name)
}
status, _ := vapp.GetStatus()
if status != "POWERED_OFF" {
return fmt.Errorf("VApp is still powered on")
}
return nil
}
}
const testAccCheckVcdVApp_basic = `
resource "vcd_network" "foonet" {
name = "foonet"
edge_gateway = "%s"
gateway = "10.10.102.1"
static_ip_pool {
start_address = "10.10.102.2"
end_address = "10.10.102.254"
}
}
resource "vcd_vapp" "foobar" {
name = "foobar"
template_name = "base-centos-7.0-x86_64_v-0.1_b-74"
catalog_name = "NubesLab"
network_name = "${vcd_network.foonet.name}"
memory = 1024
cpus = 1
ip = "10.10.102.160"
}
`
const testAccCheckVcdVApp_powerOff = `
resource "vcd_network" "foonet" {
name = "foonet"
edge_gateway = "%s"
gateway = "10.10.102.1"
static_ip_pool {
start_address = "10.10.102.2"
end_address = "10.10.102.254"
}
}
resource "vcd_vapp" "foobar" {
name = "foobar"
template_name = "base-centos-7.0-x86_64_v-0.1_b-74"
catalog_name = "NubesLab"
network_name = "${vcd_network.foonet.name}"
memory = 1024
cpus = 1
ip = "10.10.102.160"
power_on = false
}
`
| [
"\"VCD_EDGE_GATWEWAY\"",
"\"VCD_EDGE_GATWEWAY\""
]
| []
| [
"VCD_EDGE_GATWEWAY"
]
| [] | ["VCD_EDGE_GATWEWAY"] | go | 1 | 0 | |
src/L12/3/assets.go | package main
import (
"time"
"github.com/jessevdk/go-assets"
)
var _Assetsbfa8d115ce0617d89507412d5393a462f8e9b003 = "<!doctype html>\r\n<body>\r\n <p>Can you see this? → {{.Bar}}</p>\r\n</body>\r\n"
var _Assets3737a75b5254ed1f6d588b40a3449721f9ea86c2 = "<!doctype html>\r\n<body>\r\n <p>你好, {{.Foo}}</p>\r\n</body>\r\n"
// Assets returns go-assets FileSystem
var Assets = assets.NewFileSystem(map[string][]string{"/": []string{"html"}, "/html": []string{"bar.tmpl", "index.tmpl"}}, map[string]*assets.File{
"/html/index.tmpl": &assets.File{
Path: "/html/index.tmpl",
FileMode: 0x1b6,
Mtime: time.Unix(1563171988, 1563171988942257600),
Data: []byte(_Assets3737a75b5254ed1f6d588b40a3449721f9ea86c2),
}, "/": &assets.File{
Path: "/",
FileMode: 0x800001ff,
Mtime: time.Unix(1563171963, 1563171963905208600),
Data: nil,
}, "/html": &assets.File{
Path: "/html",
FileMode: 0x800001ff,
Mtime: time.Unix(1563169994, 1563169994049574100),
Data: nil,
}, "/html/bar.tmpl": &assets.File{
Path: "/html/bar.tmpl",
FileMode: 0x1b6,
Mtime: time.Unix(1561636086, 1561636086915538600),
Data: []byte(_Assetsbfa8d115ce0617d89507412d5393a462f8e9b003),
}}, "")
| []
| []
| []
| [] | [] | go | null | null | null |
tools/generator/cmd/automation/automationCmd.go | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package automation
import (
"bufio"
"fmt"
"log"
"os"
"path/filepath"
"sort"
"strings"
"github.com/Azure/azure-sdk-for-go/tools/generator/autorest"
"github.com/Azure/azure-sdk-for-go/tools/generator/autorest/model"
"github.com/Azure/azure-sdk-for-go/tools/generator/cmd/automation/pipeline"
"github.com/Azure/azure-sdk-for-go/tools/generator/common"
"github.com/Azure/azure-sdk-for-go/tools/internal/exports"
"github.com/Azure/azure-sdk-for-go/tools/internal/packages/track1"
"github.com/Azure/azure-sdk-for-go/tools/internal/utils"
"github.com/spf13/cobra"
)
// Command returns the automation command. Note that this command is designed to run in the root directory of
// azure-sdk-for-go. It does not work if you are running this tool in somewhere else
func Command() *cobra.Command {
automationCmd := &cobra.Command{
Use: "automation <generate input filepath> <generate output filepath>",
Args: cobra.ExactArgs(2),
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
log.SetFlags(0) // remove the time stamp prefix
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
optionPath, err := cmd.Flags().GetString("options")
if err != nil {
logError(err)
return err
}
if err := execute(args[0], args[1], Flags{
OptionPath: optionPath,
}); err != nil {
logError(err)
return err
}
return nil
},
SilenceUsage: true, // this command is used for a pipeline, the usage should never show
}
flags := automationCmd.Flags()
flags.String("options", common.DefaultOptionPath, "Specify a file with the autorest options")
return automationCmd
}
// Flags ...
type Flags struct {
OptionPath string
}
func execute(inputPath, outputPath string, flags Flags) error {
log.Printf("Reading generate input file from '%s'...", inputPath)
input, err := pipeline.ReadInput(inputPath)
if err != nil {
return fmt.Errorf("cannot read generate input: %+v", err)
}
log.Printf("Generating using the following GenerateInput...\n%s", input.String())
cwd, err := os.Getwd()
if err != nil {
return err
}
log.Printf("Using current directory as SDK root: %s", cwd)
ctx := automationContext{
sdkRoot: utils.NormalizePath(cwd),
specRoot: input.SpecFolder,
commitHash: input.HeadSha,
optionPath: flags.OptionPath,
}
output, err := ctx.generate(input)
if err != nil {
return err
}
log.Printf("Output generated: \n%s", output.String())
log.Printf("Writing output to file '%s'...", outputPath)
if err := pipeline.WriteOutput(outputPath, output); err != nil {
return fmt.Errorf("cannot write generate output: %+v", err)
}
return nil
}
func tempDir() string {
if dir := os.Getenv("TMP_DIR"); dir != "" {
return dir
}
return os.TempDir()
}
type automationContext struct {
sdkRoot string
specRoot string
commitHash string
optionPath string
repoContent map[string]exports.Content
sdkVersion string
existingPackages existingPackageMap
defaultOptions model.Options
additionalOptions []model.Option
}
func (ctx *automationContext) categorizePackages() error {
ctx.existingPackages = existingPackageMap{}
serviceRoot := filepath.Join(ctx.sdkRoot, "services")
m, err := autorest.CollectGenerationMetadata(serviceRoot)
if err != nil {
return err
}
for path, metadata := range m {
// the path in the metadata map is the absolute path
relPath, err := filepath.Rel(ctx.sdkRoot, path)
if err != nil {
return err
}
ctx.existingPackages.add(utils.NormalizePath(relPath), metadata)
}
return nil
}
func (ctx *automationContext) readDefaultOptions() error {
log.Printf("Reading defaultOptions from file '%s'...", ctx.optionPath)
optionFile, err := os.Open(ctx.optionPath)
if err != nil {
return err
}
generateOptions, err := model.NewGenerateOptionsFrom(optionFile)
if err != nil {
return err
}
// parsing the default options
defaultOptions, err := model.ParseOptions(generateOptions.AutorestArguments)
if err != nil {
return fmt.Errorf("cannot parse default options from %v: %+v", generateOptions.AutorestArguments, err)
}
// remove the `--multiapi` in default options
var options []model.Option
for _, o := range defaultOptions.Arguments() {
if v, ok := o.(model.FlagOption); ok && v.Flag() == "multiapi" {
continue
}
options = append(options, o)
}
ctx.defaultOptions = model.NewOptions(options...)
log.Printf("Autorest defaultOptions: \n%+v", ctx.defaultOptions.Arguments())
// parsing the additional options
additionalOptions, err := model.ParseOptions(generateOptions.AdditionalOptions)
if err != nil {
return fmt.Errorf("cannot parse additional options from %v: %+v", generateOptions.AdditionalOptions, err)
}
ctx.additionalOptions = additionalOptions.Arguments()
return nil
}
// TODO -- support dry run
func (ctx *automationContext) generate(input *pipeline.GenerateInput) (*pipeline.GenerateOutput, error) {
if input.DryRun {
return nil, fmt.Errorf("dry run not supported yet")
}
log.Printf("Reading packages in azure-sdk-for-go...")
if err := ctx.readRepoContent(); err != nil {
return nil, err
}
log.Printf("Reading metadata information in azure-sdk-for-go...")
if err := ctx.categorizePackages(); err != nil {
return nil, err
}
log.Printf("Reading default options...")
if err := ctx.readDefaultOptions(); err != nil {
return nil, err
}
log.Printf("Reading version number...")
if err := ctx.readVersion(); err != nil {
return nil, err
}
// iterate over all the readme
results := make([]pipeline.PackageResult, 0)
errorBuilder := generateErrorBuilder{}
for _, readme := range input.RelatedReadmeMdFiles {
generateCtx := generateContext{
sdkRoot: ctx.sdkRoot,
specRoot: ctx.specRoot,
commitHash: ctx.commitHash,
repoContent: ctx.repoContent,
existingPackages: ctx.existingPackages[readme],
defaultOptions: ctx.defaultOptions,
}
packageResults, errors := generateCtx.generate(readme)
if len(errors) != 0 {
errorBuilder.add(errors...)
continue
}
// iterate over the changed packages
set := packageResultSet{}
for _, p := range packageResults {
log.Printf("Getting package result for package '%s'", p.Package.PackageName)
content := p.Package.Changelog.ToCompactMarkdown()
breaking := p.Package.Changelog.HasBreakingChanges()
breakingChangeItems := p.Package.Changelog.GetBreakingChangeItems()
set.add(pipeline.PackageResult{
Version: ctx.sdkVersion,
PackageName: getPackageIdentifier(p.Package.PackageName),
Path: []string{p.Package.PackageName},
ReadmeMd: []string{readme},
Changelog: &pipeline.Changelog{
Content: &content,
HasBreakingChange: &breaking,
BreakingChangeItems: &breakingChangeItems,
},
})
}
results = append(results, set.toSlice()...)
}
// validate the sdk structure
log.Printf("Validating services directory structure...")
exceptions, err := loadExceptions(filepath.Join(ctx.sdkRoot, "tools/pkgchk/exceptions.txt"))
if err != nil {
return nil, err
}
if err := track1.VerifyWithDefaultVerifiers(filepath.Join(ctx.sdkRoot, "services"), exceptions); err != nil {
return nil, err
}
return &pipeline.GenerateOutput{
Packages: squashResults(results),
}, errorBuilder.build()
}
// squashResults squashes the package results by appending all of the `path`s in the following items to the first item
// By doing this, the SDK automation pipeline will only create one PR that contains all of the generation results
// instead of creating one PR for each generation result.
// This is to reduce the resource cost on GitHub
func squashResults(packages []pipeline.PackageResult) []pipeline.PackageResult {
if len(packages) == 0 {
return packages
}
for i := 1; i < len(packages); i++ {
// append the path of the i-th item to the first
packages[0].Path = append(packages[0].Path, packages[i].Path...)
// erase the path on the i-th item
packages[i].Path = make([]string, 0)
}
return packages
}
func (ctx *automationContext) readRepoContent() error {
ctx.repoContent = make(map[string]exports.Content)
pkgs, err := track1.List(filepath.Join(ctx.sdkRoot, "services"))
if err != nil {
return fmt.Errorf("failed to list track 1 packages: %+v", err)
}
for _, pkg := range pkgs {
relativePath, err := filepath.Rel(ctx.sdkRoot, pkg.FullPath())
if err != nil {
return err
}
relativePath = utils.NormalizePath(relativePath)
if _, ok := ctx.repoContent[relativePath]; ok {
return fmt.Errorf("duplicate package: %s", pkg.Path())
}
exp, err := exports.Get(pkg.FullPath())
if err != nil {
return err
}
ctx.repoContent[relativePath] = exp
}
return nil
}
func (ctx *automationContext) readVersion() error {
v, err := ReadVersion(filepath.Join(ctx.sdkRoot, "version"))
if err != nil {
return err
}
ctx.sdkVersion = v
return nil
}
func contains(array []autorest.GenerateResult, item string) bool {
for _, r := range array {
if utils.NormalizePath(r.Package.PackageName) == utils.NormalizePath(item) {
return true
}
}
return false
}
type generateErrorBuilder struct {
errors []error
}
func (b *generateErrorBuilder) add(err ...error) {
b.errors = append(b.errors, err...)
}
func (b *generateErrorBuilder) build() error {
if len(b.errors) == 0 {
return nil
}
var messages []string
for _, err := range b.errors {
messages = append(messages, err.Error())
}
return fmt.Errorf("total %d error(s): \n%s", len(b.errors), strings.Join(messages, "\n"))
}
type packageResultSet map[string]pipeline.PackageResult
func (s packageResultSet) contains(r pipeline.PackageResult) bool {
_, ok := s[r.PackageName]
return ok
}
func (s packageResultSet) add(r pipeline.PackageResult) {
if s.contains(r) {
log.Printf("[WARNING] The result set already contains key %s with value %+v, but we are still trying to insert a new value %+v on the same key", r.PackageName, s[r.PackageName], r)
}
s[r.PackageName] = r
}
func (s packageResultSet) toSlice() []pipeline.PackageResult {
results := make([]pipeline.PackageResult, 0)
for _, r := range s {
results = append(results, r)
}
// sort the results
sort.SliceStable(results, func(i, j int) bool {
// we first clip the preview segment and then sort by string literal
pI := strings.Replace(results[i].PackageName, "preview/", "/", 1)
pJ := strings.Replace(results[j].PackageName, "preview/", "/", 1)
return pI > pJ
})
return results
}
func getPackageIdentifier(pkg string) string {
return strings.TrimPrefix(utils.NormalizePath(pkg), "services/")
}
func loadExceptions(exceptFile string) (map[string]bool, error) {
if exceptFile == "" {
return nil, nil
}
f, err := os.Open(exceptFile)
if err != nil {
return nil, err
}
defer f.Close()
exceptions := make(map[string]bool)
scanner := bufio.NewScanner(f)
for scanner.Scan() {
exceptions[scanner.Text()] = true
}
if err = scanner.Err(); err != nil {
return nil, err
}
return exceptions, nil
}
func logError(err error) {
for _, line := range strings.Split(err.Error(), "\n") {
if l := strings.TrimSpace(line); l != "" {
log.Printf("[ERROR] %s", l)
}
}
}
| [
"\"TMP_DIR\""
]
| []
| [
"TMP_DIR"
]
| [] | ["TMP_DIR"] | go | 1 | 0 | |
internal/storage/google_cloud_storage_test.go | // Copyright 2020 the Exposure Notifications Server authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build google all
package storage
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"testing"
"cloud.google.com/go/storage"
"github.com/google/exposure-notifications-server/internal/project"
)
func maybeSkipCloudStorage(tb testing.TB) {
tb.Helper()
if testing.Short() {
tb.Skipf("🚧 Skipping Google Cloud Storage tests (short)!")
}
if skip, _ := strconv.ParseBool(os.Getenv("SKIP_GOOGLE_CLOUD_STORAGE_TESTS")); skip {
tb.Skipf("🚧 Skipping Google Cloud Storage tests (SKIP_GOOGLE_CLOUD_STORAGE_TESTS is set)!")
}
}
func testGoogleCloudStorageClient(tb testing.TB) *storage.Client {
tb.Helper()
maybeSkipCloudStorage(tb)
ctx := project.TestContext(tb)
client, err := storage.NewClient(ctx)
if err != nil {
tb.Fatal(err)
}
return client
}
func testName(tb testing.TB) string {
tb.Helper()
var b [512]byte
if _, err := rand.Read(b[:]); err != nil {
tb.Fatalf("failed to generate random: %v", err)
}
digest := fmt.Sprintf("%x", sha256.Sum256(b[:]))
return digest[:32]
}
func testGoogleCloudStorageBucket(tb testing.TB) string {
tb.Helper()
maybeSkipCloudStorage(tb)
bucketID := os.Getenv("GOOGLE_CLOUD_BUCKET")
if bucketID == "" {
tb.Fatal("missing GOOGLE_CLOUD_BUCKET!")
}
return bucketID
}
func testGoogleCloudStorageObject(tb testing.TB, r io.Reader) string {
tb.Helper()
maybeSkipCloudStorage(tb)
ctx := project.TestContext(tb)
client := testGoogleCloudStorageClient(tb)
bucket := testGoogleCloudStorageBucket(tb)
name := testName(tb)
// Create the object.
w := client.Bucket(bucket).Object(name).NewWriter(ctx)
if _, err := io.Copy(w, r); err != nil {
tb.Fatalf("failed to create object: %v", err)
}
if err := w.Close(); err != nil {
tb.Fatalf("failed to close writer: %v", err)
}
// Schedule cleanup.
tb.Cleanup(func() {
if err := client.Bucket(bucket).Object(name).Delete(ctx); err != nil && !errors.Is(err, storage.ErrObjectNotExist) {
tb.Fatalf("failed cleaning up %s: %v", name, err)
}
})
return name
}
func TestGoogleCloudStorage_CreateObject(t *testing.T) {
t.Parallel()
ctx := project.TestContext(t)
client := testGoogleCloudStorageClient(t)
bucket := testGoogleCloudStorageBucket(t)
object := testGoogleCloudStorageObject(t, strings.NewReader("contents"))
cases := []struct {
name string
bucket string
object string
contents []byte
err bool
}{
{
name: "default",
bucket: bucket,
object: testName(t),
contents: []byte("contents"),
},
{
name: "already_exists",
bucket: bucket,
object: object,
contents: []byte("contents"),
err: false,
},
{
name: "bad_bucket",
bucket: "totally-like-not-a-real-bucket",
object: testName(t),
err: true,
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
gcsStorage, err := NewGoogleCloudStorage(ctx, &Config{})
if err != nil {
t.Fatal(err)
}
err = gcsStorage.CreateObject(ctx, tc.bucket, tc.object, tc.contents, false, ContentTypeZip)
if (err != nil) != tc.err {
t.Fatal(err)
}
if !tc.err {
r, err := client.Bucket(tc.bucket).Object(tc.object).NewReader(ctx)
if err != nil {
t.Fatal(err)
}
defer r.Close()
contents, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(contents, tc.contents) {
t.Errorf("expected %q to be %q ", contents, tc.contents)
}
}
})
}
}
func TestGoogleCloudStorage_DeleteObject(t *testing.T) {
t.Parallel()
ctx := project.TestContext(t)
client := testGoogleCloudStorageClient(t)
bucket := testGoogleCloudStorageBucket(t)
object := testGoogleCloudStorageObject(t, strings.NewReader("contents"))
cases := []struct {
name string
bucket string
object string
}{
{
name: "default",
bucket: bucket,
object: object,
},
{
name: "bucket_not_exist",
bucket: "totally-like-not-a-real-bucket",
object: object,
},
{
name: "file_not_exist",
bucket: bucket,
object: "not-exist",
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
gcsStorage, err := NewGoogleCloudStorage(ctx, &Config{})
if err != nil {
t.Fatal(err)
}
if err := gcsStorage.DeleteObject(ctx, tc.bucket, tc.object); err != nil {
t.Fatal(err)
}
if _, err := client.Bucket(tc.bucket).Object(tc.object).Attrs(ctx); !errors.Is(err, storage.ErrObjectNotExist) {
t.Errorf("expected object %v to be deleted", tc.object)
}
})
}
}
| [
"\"SKIP_GOOGLE_CLOUD_STORAGE_TESTS\"",
"\"GOOGLE_CLOUD_BUCKET\""
]
| []
| [
"SKIP_GOOGLE_CLOUD_STORAGE_TESTS",
"GOOGLE_CLOUD_BUCKET"
]
| [] | ["SKIP_GOOGLE_CLOUD_STORAGE_TESTS", "GOOGLE_CLOUD_BUCKET"] | go | 2 | 0 | |
template/golang-middleware/main.go | package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"strconv"
"sync/atomic"
"syscall"
"time"
"handler/function"
)
var (
acceptingConnections int32
)
const defaultTimeout = 10 * time.Second
func main() {
readTimeout := parseIntOrDurationValue(os.Getenv("read_timeout"), defaultTimeout)
writeTimeout := parseIntOrDurationValue(os.Getenv("write_timeout"), defaultTimeout)
s := &http.Server{
Addr: fmt.Sprintf(":%d", 8082),
ReadTimeout: readTimeout,
WriteTimeout: writeTimeout,
MaxHeaderBytes: 1 << 20, // Max header of 1MB
}
http.HandleFunc("/", function.Handle)
listenUntilShutdown(s, writeTimeout)
}
func listenUntilShutdown(s *http.Server, shutdownTimeout time.Duration) {
idleConnsClosed := make(chan struct{})
go func() {
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGTERM)
<-sig
log.Printf("[entrypoint] SIGTERM received.. shutting down server in %s\n", shutdownTimeout.String())
<-time.Tick(shutdownTimeout)
if err := s.Shutdown(context.Background()); err != nil {
log.Printf("[entrypoint] Error in Shutdown: %v", err)
}
log.Printf("[entrypoint] No new connections allowed. Exiting in: %s\n", shutdownTimeout.String())
<-time.Tick(shutdownTimeout)
close(idleConnsClosed)
}()
// Run the HTTP server in a separate go-routine.
go func() {
if err := s.ListenAndServe(); err != http.ErrServerClosed {
log.Printf("[entrypoint] Error ListenAndServe: %v", err)
close(idleConnsClosed)
}
}()
atomic.StoreInt32(&acceptingConnections, 1)
<-idleConnsClosed
}
func parseIntOrDurationValue(val string, fallback time.Duration) time.Duration {
if len(val) > 0 {
parsedVal, parseErr := strconv.Atoi(val)
if parseErr == nil && parsedVal >= 0 {
return time.Duration(parsedVal) * time.Second
}
}
duration, durationErr := time.ParseDuration(val)
if durationErr != nil {
return fallback
}
return duration
}
| [
"\"read_timeout\"",
"\"write_timeout\""
]
| []
| [
"write_timeout",
"read_timeout"
]
| [] | ["write_timeout", "read_timeout"] | go | 2 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# counsel_me directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "counsel_me"))
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
google_test.go | package serp
import (
"os"
"reflect"
"strings"
"testing"
)
func setup() {
v := os.Getenv("API_KEY")
if len(v) == 0 {
setAPIKey("demo")
} else {
setAPIKey(v)
}
}
func shoulSkip() bool {
return len(os.Getenv("API_KEY")) == 0
}
func TestQuickStart(t *testing.T) {
if shoulSkip() {
t.Skip("API_KEY required")
return
}
setup()
parameter := map[string]string{
"q": "Coffee",
"location": "Portland, Oregon, United States",
"hl": "en",
"gl": "us",
"google_domain": "google.com",
"api_key": os.Getenv("API_KEY"),
"safe": "active",
"start": "10",
"num": "10",
"device": "desktop",
}
client := NewGoogleSearch(parameter)
rsp, err := client.GetJSON()
if err != nil {
t.Error(err)
return
}
if len(rsp.OrganicResults[0].Title) == 0 {
t.Error("empty title in local results")
return
}
}
// basic use case
func TestJSON(t *testing.T) {
if shoulSkip() {
t.Skip("API_KEY required")
return
}
setup()
parameter := map[string]string{
"api_key": os.Getenv("API_KEY"),
"q": "Coffee",
"location": "Portland"}
client := NewGoogleSearch(parameter)
rsp, err := client.GetJSON()
if err != nil {
t.Error("unexpected error", err)
return
}
result := rsp.OrganicResults[0]
if len(result.Title) == 0 {
t.Error("empty title in local results")
return
}
}
func TestJSONwithGlobalKey(t *testing.T) {
if shoulSkip() {
t.Skip("API_KEY required")
return
}
setup()
parameter := map[string]string{
"q": "Coffee",
"location": "Portland"}
client := NewGoogleSearch(parameter)
rsp, err := client.GetJSON()
if err != nil {
t.Error("unexpected error", err)
return
}
result := rsp.OrganicResults[0]
if len(result.Title) == 0 {
t.Error("empty title in local results")
return
}
}
func TestGetHTML(t *testing.T) {
if shoulSkip() {
t.Skip("API_KEY required")
return
}
parameter := map[string]string{
"q": "Coffee",
"location": "Portland"}
setup()
client := NewGoogleSearch(parameter)
data, err := client.GetHTML()
if err != nil {
t.Error("err must be nil")
return
}
if !strings.Contains(*data, "</html>") {
t.Error("data does not contains <html> tag")
}
}
func TestDecodeJson(t *testing.T) {
reader, err := os.Open("./data/search_coffee_sample.json")
if err != nil {
panic(err)
}
var sq Query
rsp, err := sq.decodeJSON(reader)
if err != nil {
t.Error("error should be nil", err)
return
}
if rsp.OrganicResults[0].Title != "Portland Roasting Coffee" {
t.Error("empty title in local results")
return
}
}
func TestDecodeJsonPage20(t *testing.T) {
t.Log("run test")
reader, err := os.Open("./data/search_coffee_sample_page20.json")
if err != nil {
panic(err)
}
var sq Query
rsp, err := sq.decodeJSON(reader)
if err != nil {
t.Error("error should be nil")
t.Error(err)
}
t.Log(reflect.ValueOf(rsp).MapKeys())
if rsp.OrganicResults[0].Title != "Coffee | HuffPost" {
t.Error("fail decoding the title ")
}
}
func TestDecodeJsonError(t *testing.T) {
reader, err := os.Open("./data/error_sample.json")
if err != nil {
panic(err)
}
var sq Query
_, err = sq.decodeJSON(reader)
if err != nil {
t.Error(err)
return
}
if err == nil {
t.Error("unexcepted err is nil")
} else if strings.Compare(err.Error(), "Your account credit is too low, plesae add more credits.") == 0 {
t.Error("empty title in local results")
return
}
}
func TestGetLocation(t *testing.T) {
setup()
var rsp ResponseArray
var err error
rsp, err = GetLocation("Austin", 3)
if err != nil {
t.Error(err)
}
//log.Println(rsp[0])
first := rsp[0].(map[string]interface{})
googleID := first["google_id"].(float64)
if googleID != float64(200635) {
t.Error(googleID)
return
}
}
// func TestGetAccount(t *testing.T) {
// // Skip this test
// if len(os.Getenv("API_KEY")) == 0 {
// t.Skip("API_KEY required")
// return
// }
// setup()
// var rsp SerpResponse
// var err error
// rsp, err = GetAccount()
// if err != nil {
// t.Error(err)
// }
// if rsp["account_id"] == nil {
// t.Error("no account_id found")
// return
// }
// }
// Search archive API
func TestSearchArchive(t *testing.T) {
if len(os.Getenv("API_KEY")) == 0 {
t.Skip("API_KEY required")
return
}
setup()
parameter := map[string]string{
"api_key": os.Getenv("API_KEY"),
"q": "Coffee",
"location": "Portland"}
client := NewGoogleSearch(parameter)
rsp, err := client.GetJSON()
if err != nil {
t.Error("unexpected error", err)
return
}
searchID := rsp.SearchMetadata.ID
if len(searchID) == 0 {
t.Error("search_metadata.id must be defined")
}
searchArchive, err := client.GetSearchArchive(searchID)
if err != nil {
t.Error(err)
return
}
if searchArchive.SearchMetadata.ID != searchID {
t.Error("search_metadata.id do not match", searchArchive.SearchMetadata.ID, searchID)
}
}
| [
"\"API_KEY\"",
"\"API_KEY\"",
"\"API_KEY\"",
"\"API_KEY\"",
"\"API_KEY\"",
"\"API_KEY\"",
"\"API_KEY\""
]
| []
| [
"API_KEY"
]
| [] | ["API_KEY"] | go | 1 | 0 | |
pkg/router/httpTriggers.go | /*
Copyright 2016 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package router
import (
"context"
"net/http"
"os"
"strconv"
"time"
"github.com/gorilla/mux"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
k8sCache "k8s.io/client-go/tools/cache"
fv1 "github.com/fission/fission/pkg/apis/core/v1"
"github.com/fission/fission/pkg/crd"
executorClient "github.com/fission/fission/pkg/executor/client"
genInformer "github.com/fission/fission/pkg/generated/informers/externalversions"
"github.com/fission/fission/pkg/throttler"
"github.com/fission/fission/pkg/utils"
"github.com/fission/fission/pkg/utils/otel"
)
// HTTPTriggerSet represents an HTTP trigger set
type HTTPTriggerSet struct {
*functionServiceMap
*mutableRouter
logger *zap.Logger
fissionClient *crd.FissionClient
kubeClient *kubernetes.Clientset
executor *executorClient.Client
resolver *functionReferenceResolver
triggers []fv1.HTTPTrigger
triggerInformer k8sCache.SharedIndexInformer
functions []fv1.Function
funcInformer k8sCache.SharedIndexInformer
updateRouterRequestChannel chan struct{}
tsRoundTripperParams *tsRoundTripperParams
isDebugEnv bool
svcAddrUpdateThrottler *throttler.Throttler
unTapServiceTimeout time.Duration
}
func makeHTTPTriggerSet(logger *zap.Logger, fmap *functionServiceMap, fissionClient *crd.FissionClient,
kubeClient *kubernetes.Clientset, executor *executorClient.Client, params *tsRoundTripperParams, isDebugEnv bool, unTapServiceTimeout time.Duration, actionThrottler *throttler.Throttler) *HTTPTriggerSet {
httpTriggerSet := &HTTPTriggerSet{
logger: logger.Named("http_trigger_set"),
functionServiceMap: fmap,
triggers: []fv1.HTTPTrigger{},
fissionClient: fissionClient,
kubeClient: kubeClient,
executor: executor,
updateRouterRequestChannel: make(chan struct{}, 10), // use buffer channel
tsRoundTripperParams: params,
isDebugEnv: isDebugEnv,
svcAddrUpdateThrottler: actionThrottler,
unTapServiceTimeout: unTapServiceTimeout,
}
informerFactory := genInformer.NewSharedInformerFactory(fissionClient, time.Minute*30)
httpTriggerSet.triggerInformer = informerFactory.Core().V1().HTTPTriggers().Informer()
httpTriggerSet.funcInformer = informerFactory.Core().V1().Functions().Informer()
httpTriggerSet.addTriggerHandlers()
httpTriggerSet.addFunctionHandlers()
return httpTriggerSet
}
func (ts *HTTPTriggerSet) subscribeRouter(ctx context.Context, mr *mutableRouter) {
resolver := makeFunctionReferenceResolver(&ts.funcInformer)
ts.resolver = resolver
ts.mutableRouter = mr
if ts.fissionClient == nil {
// Used in tests only.
mr.updateRouter(ts.getRouter(nil))
ts.logger.Info("skipping continuous trigger updates")
return
}
go ts.updateRouter()
go ts.syncTriggers()
go ts.runInformer(ctx, ts.funcInformer)
go ts.runInformer(ctx, ts.triggerInformer)
}
func defaultHomeHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func routerHealthHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (ts *HTTPTriggerSet) getRouter(fnTimeoutMap map[types.UID]int) *mux.Router {
muxRouter := mux.NewRouter()
openTracingEnabled, err := strconv.ParseBool(os.Getenv("OPENTRACING_ENABLED"))
if err != nil {
ts.logger.Fatal("error parsing OPENTRACING_ENABLED", zap.Error(err))
}
// HTTP triggers setup by the user
homeHandled := false
for i := range ts.triggers {
trigger := ts.triggers[i]
// resolve function reference
rr, err := ts.resolver.resolve(trigger)
if err != nil {
// Unresolvable function reference. Report the error via
// the trigger's status.
go ts.updateTriggerStatusFailed(&trigger, err)
// Ignore this route and let it 404.
continue
}
if rr.resolveResultType != resolveResultSingleFunction && rr.resolveResultType != resolveResultMultipleFunctions {
// not implemented yet
ts.logger.Panic("resolve result type not implemented", zap.Any("type", rr.resolveResultType))
}
fh := &functionHandler{
logger: ts.logger.Named(trigger.ObjectMeta.Name),
fmap: ts.functionServiceMap,
executor: ts.executor,
httpTrigger: &trigger,
functionMap: rr.functionMap,
fnWeightDistributionList: rr.functionWtDistributionList,
tsRoundTripperParams: ts.tsRoundTripperParams,
isDebugEnv: ts.isDebugEnv,
svcAddrUpdateThrottler: ts.svcAddrUpdateThrottler,
functionTimeoutMap: fnTimeoutMap,
unTapServiceTimeout: ts.unTapServiceTimeout,
openTracingEnabled: openTracingEnabled,
}
// The functionHandler for HTTP trigger with fn reference type "FunctionReferenceTypeFunctionName",
// it's function metadata is set here.
// The functionHandler For HTTP trigger with fn reference type "FunctionReferenceTypeFunctionWeights",
// it's function metadata is decided dynamically before proxying the request in order to support canary
// deployment. For more details, please check "handler" function of functionHandler.
if rr.resolveResultType == resolveResultSingleFunction {
for _, fn := range fh.functionMap {
fh.function = fn
}
}
var ht *mux.Route
if trigger.Spec.Prefix != nil && *trigger.Spec.Prefix != "" {
if openTracingEnabled {
ht = muxRouter.PathPrefix(*trigger.Spec.Prefix).HandlerFunc(fh.handler)
} else {
handler := otel.GetHandlerWithOTEL(http.HandlerFunc(fh.handler), *trigger.Spec.Prefix)
ht = muxRouter.PathPrefix(*trigger.Spec.Prefix).Handler(handler)
}
} else {
if openTracingEnabled {
ht = muxRouter.HandleFunc(trigger.Spec.RelativeURL, fh.handler)
} else {
handler := otel.GetHandlerWithOTEL(http.HandlerFunc(fh.handler), trigger.Spec.RelativeURL)
ht = muxRouter.Handle(trigger.Spec.RelativeURL, handler)
}
}
methods := trigger.Spec.Methods
if len(trigger.Spec.Method) > 0 {
present := false
for _, m := range trigger.Spec.Methods {
if m == trigger.Spec.Method {
present = true
break
}
}
if !present {
methods = append(methods, trigger.Spec.Method)
}
}
ht.Methods(methods...)
if trigger.Spec.Host != "" {
ht.Host(trigger.Spec.Host)
}
if trigger.Spec.Prefix == nil && trigger.Spec.RelativeURL == "/" && len(methods) == 1 && methods[0] == http.MethodGet {
homeHandled = true
}
}
if !homeHandled {
//
// This adds a no-op handler that returns 200-OK to make sure that the
// "GET /" request succeeds. This route is used by GKE Ingress (and
// perhaps other ingress implementations) as a health check, so we don't
// want it to be a 404 even if the user doesn't have a function mapped to
// this route.
//
muxRouter.HandleFunc("/", defaultHomeHandler).Methods("GET")
}
// Internal triggers for each function by name. Non-http
// triggers route into these.
for i := range ts.functions {
fn := ts.functions[i]
fh := &functionHandler{
logger: ts.logger.Named(fn.ObjectMeta.Name),
fmap: ts.functionServiceMap,
function: &fn,
executor: ts.executor,
tsRoundTripperParams: ts.tsRoundTripperParams,
isDebugEnv: ts.isDebugEnv,
svcAddrUpdateThrottler: ts.svcAddrUpdateThrottler,
functionTimeoutMap: fnTimeoutMap,
unTapServiceTimeout: ts.unTapServiceTimeout,
}
route := utils.UrlForFunction(fn.ObjectMeta.Name, fn.ObjectMeta.Namespace)
if openTracingEnabled {
muxRouter.PathPrefix(route).HandlerFunc(fh.handler)
} else {
otelHandler := otel.GetHandlerWithOTEL(http.HandlerFunc(fh.handler), route)
muxRouter.PathPrefix(route).Handler(otelHandler)
}
}
// Healthz endpoint for the router.
muxRouter.HandleFunc("/router-healthz", routerHealthHandler).Methods("GET")
return muxRouter
}
func (ts *HTTPTriggerSet) updateTriggerStatusFailed(ht *fv1.HTTPTrigger, err error) {
// TODO
}
func (ts *HTTPTriggerSet) addTriggerHandlers() {
ts.triggerInformer.AddEventHandler(k8sCache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
trigger := obj.(*fv1.HTTPTrigger)
go createIngress(ts.logger, trigger, ts.kubeClient)
ts.syncTriggers()
},
DeleteFunc: func(obj interface{}) {
ts.syncTriggers()
trigger := obj.(*fv1.HTTPTrigger)
go deleteIngress(ts.logger, trigger, ts.kubeClient)
},
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
oldTrigger := oldObj.(*fv1.HTTPTrigger)
newTrigger := newObj.(*fv1.HTTPTrigger)
if oldTrigger.ObjectMeta.ResourceVersion == newTrigger.ObjectMeta.ResourceVersion {
return
}
go updateIngress(ts.logger, oldTrigger, newTrigger, ts.kubeClient)
ts.syncTriggers()
},
})
}
func (ts *HTTPTriggerSet) addFunctionHandlers() {
ts.funcInformer.AddEventHandler(k8sCache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ts.syncTriggers()
},
DeleteFunc: func(obj interface{}) {
ts.syncTriggers()
},
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
oldFn := oldObj.(*fv1.Function)
fn := newObj.(*fv1.Function)
if oldFn.ObjectMeta.ResourceVersion == fn.ObjectMeta.ResourceVersion {
return
}
// update resolver function reference cache
for key, rr := range ts.resolver.copy() {
if key.namespace == fn.ObjectMeta.Namespace &&
rr.functionMap[fn.ObjectMeta.Name] != nil &&
rr.functionMap[fn.ObjectMeta.Name].ObjectMeta.ResourceVersion != fn.ObjectMeta.ResourceVersion {
// invalidate resolver cache
ts.logger.Debug("invalidating resolver cache")
err := ts.resolver.delete(key.namespace, key.triggerName, key.triggerResourceVersion)
if err != nil {
ts.logger.Error("error deleting functionReferenceResolver cache", zap.Error(err))
}
break
}
}
ts.syncTriggers()
},
})
}
func (ts *HTTPTriggerSet) runInformer(ctx context.Context, informer k8sCache.SharedIndexInformer) {
go func() {
informer.Run(ctx.Done())
}()
}
func (ts *HTTPTriggerSet) syncTriggers() {
ts.updateRouterRequestChannel <- struct{}{}
}
func (ts *HTTPTriggerSet) updateRouter() {
for range ts.updateRouterRequestChannel {
// get triggers
latestTriggers := ts.triggerInformer.GetStore().List()
triggers := make([]fv1.HTTPTrigger, len(latestTriggers))
for _, t := range latestTriggers {
triggers = append(triggers, *t.(*fv1.HTTPTrigger))
}
ts.triggers = triggers
// get functions
latestFunctions := ts.funcInformer.GetStore().List()
functionTimeout := make(map[types.UID]int, len(latestFunctions))
functions := make([]fv1.Function, len(latestFunctions))
for _, f := range latestFunctions {
fn := *f.(*fv1.Function)
functionTimeout[fn.ObjectMeta.UID] = fn.Spec.FunctionTimeout
functions = append(functions, *f.(*fv1.Function))
}
ts.functions = functions
// make a new router and use it
ts.mutableRouter.updateRouter(ts.getRouter(functionTimeout))
}
}
| [
"\"OPENTRACING_ENABLED\""
]
| []
| [
"OPENTRACING_ENABLED"
]
| [] | ["OPENTRACING_ENABLED"] | go | 1 | 0 | |
examples/db/listTables/listTables/main.go | package main
import (
"fmt"
"os"
"go.m3o.com"
"go.m3o.com/db"
)
func main() {
client := m3o.New(os.Getenv("M3O_API_TOKEN"))
rsp, err := client.Db.ListTables(&db.ListTablesRequest{})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
internal/versioncheck/version.go | package versioncheck
import (
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/go-flutter-desktop/hover/internal/fileutils"
"github.com/go-flutter-desktop/hover/internal/log"
"github.com/pkg/errors"
"github.com/tcnksm/go-latest"
)
// CheckForGoFlutterUpdate check the last 'go-flutter' timestamp we have cached
// for the current project. If the last update comes back to more than X days,
// fetch the last Github release semver. If the Github semver is more recent
// than the current one, display the update notice.
func CheckForGoFlutterUpdate(goDirectoryPath string, currentTag string) {
cachedGoFlutterCheckPath := filepath.Join(goDirectoryPath, ".last_goflutter_check")
cachedGoFlutterCheckBytes, err := ioutil.ReadFile(cachedGoFlutterCheckPath)
if err != nil && !os.IsNotExist(err) {
log.Warnf("Failed to read the go-flutter last update check: %v", err)
return
}
cachedGoFlutterCheck := string(cachedGoFlutterCheckBytes)
cachedGoFlutterCheck = strings.TrimSuffix(cachedGoFlutterCheck, "\n")
now := time.Now()
nowString := strconv.FormatInt(now.Unix(), 10)
if cachedGoFlutterCheck == "" {
err = ioutil.WriteFile(cachedGoFlutterCheckPath, []byte(nowString), 0664)
if err != nil {
log.Warnf("Failed to write the update timestamp: %v", err)
}
// If needed, update the hover's .gitignore file with a new entry.
hoverGitignore := filepath.Join(goDirectoryPath, ".gitignore")
fileutils.AddLineToFile(hoverGitignore, ".last_goflutter_check")
return
}
i, err := strconv.ParseInt(cachedGoFlutterCheck, 10, 64)
if err != nil {
log.Warnf("Failed to parse the last update of go-flutter: %v", err)
return
}
lastUpdateTimeStamp := time.Unix(i, 0)
checkRate := 1.0
newCheck := now.Sub(lastUpdateTimeStamp).Hours() > checkRate ||
(now.Sub(lastUpdateTimeStamp).Minutes() < 1.0 && // keep the notice for X Minutes
now.Sub(lastUpdateTimeStamp).Minutes() > 0.0)
checkUpdateOptOut := os.Getenv("HOVER_IGNORE_CHECK_NEW_RELEASE")
if newCheck && checkUpdateOptOut != "true" {
log.Printf("Checking available release on Github")
// fecth the last githubTag
githubTag := &latest.GithubTag{
Owner: "go-flutter-desktop",
Repository: "go-flutter",
FixVersionStrFunc: latest.DeleteFrontV(),
}
res, err := latest.Check(githubTag, currentTag)
if err != nil {
log.Warnf("Failed to check the latest release of 'go-flutter': %v", err)
// update the timestamp
// don't spam people who don't have access to internet
now := time.Now().Add(time.Duration(checkRate) * time.Hour)
nowString := strconv.FormatInt(now.Unix(), 10)
err = ioutil.WriteFile(cachedGoFlutterCheckPath, []byte(nowString), 0664)
if err != nil {
log.Warnf("Failed to write the update timestamp to file: %v", err)
}
return
}
if res.Outdated {
log.Infof("The core library 'go-flutter' has an update available. (%s -> %s)", currentTag, res.Current)
log.Infof(" To update 'go-flutter' in this project run: `%s`", log.Au().Magenta("hover bumpversion"))
}
if now.Sub(lastUpdateTimeStamp).Hours() > checkRate {
// update the timestamp
err = ioutil.WriteFile(cachedGoFlutterCheckPath, []byte(nowString), 0664)
if err != nil {
log.Warnf("Failed to write the update timestamp to file: %v", err)
}
}
}
}
// CurrentGoFlutterTag retrieve the semver of go-flutter in 'go.mod'
func CurrentGoFlutterTag(goDirectoryPath string) (currentTag string, err error) {
goModPath := filepath.Join(goDirectoryPath, "go.mod")
goModBytes, err := ioutil.ReadFile(goModPath)
if err != nil && !os.IsNotExist(err) {
err = errors.Wrap(err, "Failed to read the 'go.mod' file: %v")
return
}
re := regexp.MustCompile(`\sgithub.com/go-flutter-desktop/go-flutter\s(\S*)`)
match := re.FindStringSubmatch(string(goModBytes))
if len(match) < 2 {
err = errors.New("Failed to parse the 'go-flutter' version in go.mod")
return
}
currentTag = match[1]
return
}
| [
"\"HOVER_IGNORE_CHECK_NEW_RELEASE\""
]
| []
| [
"HOVER_IGNORE_CHECK_NEW_RELEASE"
]
| [] | ["HOVER_IGNORE_CHECK_NEW_RELEASE"] | go | 1 | 0 | |
pkg/network/render.go | package network
import (
"log"
"net"
"os"
"path/filepath"
"reflect"
"strings"
"github.com/pkg/errors"
operv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/cluster-network-operator/pkg/bootstrap"
"github.com/openshift/cluster-network-operator/pkg/render"
iputil "github.com/openshift/cluster-network-operator/pkg/util/ip"
uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilnet "k8s.io/utils/net"
)
func Render(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.BootstrapResult, manifestDir string) ([]*uns.Unstructured, error) {
log.Printf("Starting render phase")
objs := []*uns.Unstructured{}
// render Multus
o, err := renderMultus(conf, manifestDir)
if err != nil {
return nil, err
}
objs = append(objs, o...)
// render MultusAdmissionController
o, err = renderMultusAdmissionController(conf, manifestDir)
if err != nil {
return nil, err
}
objs = append(objs, o...)
// render MultiNetworkPolicy
o, err = renderMultiNetworkpolicy(conf, manifestDir)
if err != nil {
return nil, err
}
objs = append(objs, o...)
// render default network
o, err = renderDefaultNetwork(conf, bootstrapResult, manifestDir)
if err != nil {
return nil, err
}
objs = append(objs, o...)
// render kube-proxy
o, err = renderStandaloneKubeProxy(conf, manifestDir)
if err != nil {
return nil, err
}
objs = append(objs, o...)
// render additional networks
o, err = renderAdditionalNetworks(conf, manifestDir)
if err != nil {
return nil, err
}
objs = append(objs, o...)
// render network diagnostics
o, err = renderNetworkDiagnostics(conf, manifestDir)
if err != nil {
return nil, err
}
objs = append(objs, o...)
log.Printf("Render phase done, rendered %d objects", len(objs))
return objs, nil
}
// deprecatedCanonicalizeIPAMConfig converts configuration to a canonical form
// for backward compatibility.
func deprecatedCanonicalizeIPAMConfig(conf *operv1.IPAMConfig) {
switch strings.ToLower(string(conf.Type)) {
case strings.ToLower(string(operv1.IPAMTypeDHCP)):
conf.Type = operv1.IPAMTypeDHCP
case strings.ToLower(string(operv1.IPAMTypeStatic)):
conf.Type = operv1.IPAMTypeStatic
}
}
// deprecatedCanonicalizeSimpleMacvlanConfig converts configuration to a canonical form
// for backward compatibility.
func deprecatedCanonicalizeSimpleMacvlanConfig(conf *operv1.SimpleMacvlanConfig) {
switch strings.ToLower(string(conf.Mode)) {
case strings.ToLower(string(operv1.MacvlanModeBridge)):
conf.Mode = operv1.MacvlanModeBridge
case strings.ToLower(string(operv1.MacvlanModePrivate)):
conf.Mode = operv1.MacvlanModePrivate
case strings.ToLower(string(operv1.MacvlanModeVEPA)):
conf.Mode = operv1.MacvlanModeVEPA
case strings.ToLower(string(operv1.MacvlanModePassthru)):
conf.Mode = operv1.MacvlanModePassthru
}
if conf.IPAMConfig != nil {
deprecatedCanonicalizeIPAMConfig(conf.IPAMConfig)
}
}
// DeprecatedCanonicalize converts configuration to a canonical form for backward
// compatibility.
//
// *** DO NOT ADD ANY NEW CANONICALIZATION TO THIS FUNCTION! ***
//
// Altering the user-provided configuration from CNO causes problems when other components
// need to look at the configuration before CNO starts. Users should just write the
// configuration in the correct form to begin with.
//
// However, we cannot remove any of the existing canonicalizations because this might
// break existing clusters.
func DeprecatedCanonicalize(conf *operv1.NetworkSpec) {
orig := conf.DeepCopy()
switch strings.ToLower(string(conf.DefaultNetwork.Type)) {
case strings.ToLower(string(operv1.NetworkTypeOpenShiftSDN)):
conf.DefaultNetwork.Type = operv1.NetworkTypeOpenShiftSDN
case strings.ToLower(string(operv1.NetworkTypeOVNKubernetes)):
conf.DefaultNetwork.Type = operv1.NetworkTypeOVNKubernetes
}
if conf.DefaultNetwork.Type == operv1.NetworkTypeOpenShiftSDN &&
conf.DefaultNetwork.OpenShiftSDNConfig != nil {
sdnc := conf.DefaultNetwork.OpenShiftSDNConfig
switch strings.ToLower(string(sdnc.Mode)) {
case strings.ToLower(string(operv1.SDNModeMultitenant)):
sdnc.Mode = operv1.SDNModeMultitenant
case strings.ToLower(string(operv1.SDNModeNetworkPolicy)):
sdnc.Mode = operv1.SDNModeNetworkPolicy
case strings.ToLower(string(operv1.SDNModeSubnet)):
sdnc.Mode = operv1.SDNModeSubnet
}
}
for idx, an := range conf.AdditionalNetworks {
switch strings.ToLower(string(an.Type)) {
case strings.ToLower(string(operv1.NetworkTypeRaw)):
conf.AdditionalNetworks[idx].Type = operv1.NetworkTypeRaw
case strings.ToLower(string(operv1.NetworkTypeSimpleMacvlan)):
conf.AdditionalNetworks[idx].Type = operv1.NetworkTypeSimpleMacvlan
}
if an.Type == operv1.NetworkTypeSimpleMacvlan && an.SimpleMacvlanConfig != nil {
deprecatedCanonicalizeSimpleMacvlanConfig(conf.AdditionalNetworks[idx].SimpleMacvlanConfig)
}
}
if !reflect.DeepEqual(orig, conf) {
log.Printf("WARNING: One or more fields of Network.operator.openshift.io was incorrectly capitalized. Although this has been fixed now, it is possible that other components previously saw the incorrect value and interpreted it incorrectly.")
log.Printf("Original spec: %#v\nModified spec: %#v\n", orig, conf)
}
}
// Validate checks that the supplied configuration is reasonable.
// This should be called after Canonicalize
func Validate(conf *operv1.NetworkSpec) error {
errs := []error{}
errs = append(errs, validateIPPools(conf)...)
errs = append(errs, validateDefaultNetwork(conf)...)
errs = append(errs, validateMultus(conf)...)
errs = append(errs, validateKubeProxy(conf)...)
if len(errs) > 0 {
return errors.Errorf("invalid configuration: %v", errs)
}
return nil
}
// FillDefaults computes any default values and applies them to the configuration
// This is a mutating operation. It should be called after Validate.
//
// Defaults are carried forward from previous if it is provided. This is so we
// can change defaults as we move forward, but won't disrupt existing clusters.
func FillDefaults(conf, previous *operv1.NetworkSpec) {
hostMTU, err := getDefaultMTU()
if hostMTU == 0 {
hostMTU = 1500
}
if previous == nil { // host mtu isn't used in subsequent runs, elide these logs
if err != nil {
log.Printf("Failed MTU probe, falling back to 1500: %v", err)
} else {
log.Printf("Detected uplink MTU %d", hostMTU)
}
}
// DisableMultiNetwork defaults to false
if conf.DisableMultiNetwork == nil {
disable := false
conf.DisableMultiNetwork = &disable
}
// UseMultiNetworkPolicy defaults to false
if conf.UseMultiNetworkPolicy == nil {
disable := false
conf.UseMultiNetworkPolicy = &disable
}
if len(conf.LogLevel) == 0 {
conf.LogLevel = "Normal"
}
fillDefaultNetworkDefaults(conf, previous, hostMTU)
fillKubeProxyDefaults(conf, previous)
}
// IsChangeSafe checks to see if the change between prev and next are allowed
// FillDefaults and Validate should have been called, but beware that prev may
// be from an older version.
func IsChangeSafe(prev, next *operv1.NetworkSpec) error {
if prev == nil {
return nil
}
// Easy way out: nothing changed.
if reflect.DeepEqual(prev, next) {
return nil
}
errs := []error{}
// Most ClusterNetworks/ServiceNetwork changes are not allowed
if err := isNetworkChangeSafe(prev, next); err != nil {
errs = append(errs, err)
}
// Check the network migration
errs = append(errs, isMigrationChangeSafe(prev, next)...)
// Check the default network
errs = append(errs, isDefaultNetworkChangeSafe(prev, next)...)
// Changing AdditionalNetworks is supported
if !reflect.DeepEqual(prev.DisableMultiNetwork, next.DisableMultiNetwork) {
errs = append(errs, errors.Errorf("cannot change DisableMultiNetwork"))
}
// Check MultiNetworkPolicy
errs = append(errs, isMultiNetworkpolicyChangeSafe(prev, next)...)
// Check kube-proxy
errs = append(errs, isKubeProxyChangeSafe(prev, next)...)
if len(errs) > 0 {
return errors.Errorf("invalid configuration: %v", errs)
}
return nil
}
func isNetworkChangeSafe(prev, next *operv1.NetworkSpec) error {
// Forbid changing service network during a migration
if prev.Migration != nil {
if !reflect.DeepEqual(prev.ServiceNetwork, next.ServiceNetwork) {
return errors.Errorf("cannot change ServiceNetwork during migration")
}
return nil
}
if reflect.DeepEqual(prev.ClusterNetwork, next.ClusterNetwork) && reflect.DeepEqual(prev.ServiceNetwork, next.ServiceNetwork) {
return nil
}
// Currently the only change we allow is switching to/from dual-stack.
//
// FIXME: the errors here currently do not actually mention dual-stack since it's
// not supported yet.
// validateIPPools() will have ensured that each config is independently either
// a valid single-stack config or a valid dual-stack config. Make sure we have
// one of each.
var singleStack, dualStack *operv1.NetworkSpec
switch {
case len(prev.ServiceNetwork) < len(next.ServiceNetwork):
// Going from single to dual
singleStack, dualStack = prev, next
case len(prev.ServiceNetwork) > len(next.ServiceNetwork):
// Going from dual to single
dualStack, singleStack = prev, next
default:
// They didn't change single-vs-dual
if reflect.DeepEqual(prev.ServiceNetwork, next.ServiceNetwork) {
return errors.Errorf("cannot change ClusterNetwork")
} else {
return errors.Errorf("cannot change ServiceNetwork")
}
}
// Validate that the shared ServiceNetwork entry is unchanged. (validateIPPools
// already checked that dualStack.ServiceNetwork[0] and [1] are of opposite IP
// families so we don't need to check that here.)
if singleStack.ServiceNetwork[0] != dualStack.ServiceNetwork[0] {
// User changed the primary service network, or tried to swap the order of
// the primary and secondary networks.
return errors.Errorf("cannot change ServiceNetwork")
}
// Validate that the shared ClusterNetwork entries are unchanged, and that ALL of
// the new ones in dualStack are of the opposite IP family from the shared ones.
// (ie, you cannot go from [ipv4] to [ipv4, ipv6, ipv4], even though the latter
// would have been valid as a new install.)
EntryZeroIsIPv6 := utilnet.IsIPv6CIDRString(singleStack.ClusterNetwork[0].CIDR)
for i := range dualStack.ClusterNetwork {
if i < len(singleStack.ClusterNetwork) {
if !reflect.DeepEqual(singleStack.ClusterNetwork[i], dualStack.ClusterNetwork[i]) {
// Changed or re-ordered an existing ClusterNetwork element
return errors.Errorf("cannot change ClusterNetwork")
}
} else if utilnet.IsIPv6CIDRString(dualStack.ClusterNetwork[i].CIDR) == EntryZeroIsIPv6 {
// Added a new element of the existing IP family
return errors.Errorf("cannot change ClusterNetwork")
}
}
return nil
}
// validateIPPools checks that all IP addresses are valid
// TODO: check for overlap
func validateIPPools(conf *operv1.NetworkSpec) []error {
errs := []error{}
// Check all networks for overlaps
pool := iputil.IPPool{}
var ipv4Service, ipv6Service, ipv4Cluster, ipv6Cluster bool
// Validate ServiceNetwork values
for _, snet := range conf.ServiceNetwork {
_, cidr, err := net.ParseCIDR(snet)
if err != nil {
errs = append(errs, errors.Wrapf(err, "could not parse spec.serviceNetwork %s", snet))
continue
}
if utilnet.IsIPv6CIDR(cidr) {
ipv6Service = true
} else {
ipv4Service = true
}
if err := pool.Add(*cidr); err != nil {
errs = append(errs, err)
}
}
// Validate count / dual-stack-ness
if len(conf.ServiceNetwork) == 0 {
errs = append(errs, errors.Errorf("spec.serviceNetwork must have at least 1 entry"))
} else if len(conf.ServiceNetwork) == 2 && !(ipv4Service && ipv6Service) {
errs = append(errs, errors.Errorf("spec.serviceNetwork must contain at most one IPv4 and one IPv6 network"))
} else if len(conf.ServiceNetwork) > 2 {
errs = append(errs, errors.Errorf("spec.serviceNetwork must contain at most one IPv4 and one IPv6 network"))
}
// validate clusternetwork
// - has an entry
// - it is a valid ip
// - has a reasonable cidr
// - they do not overlap and do not overlap with the service cidr
for _, cnet := range conf.ClusterNetwork {
_, cidr, err := net.ParseCIDR(cnet.CIDR)
if err != nil {
errs = append(errs, errors.Errorf("could not parse spec.clusterNetwork %s", cnet.CIDR))
continue
}
if utilnet.IsIPv6CIDR(cidr) {
ipv6Cluster = true
} else {
ipv4Cluster = true
}
// ignore hostPrefix if the plugin does not use it and has it unset
if pluginsUsingHostPrefix.Has(string(conf.DefaultNetwork.Type)) || (cnet.HostPrefix != 0) {
ones, bits := cidr.Mask.Size()
// The comparison is inverted; smaller number is larger block
if cnet.HostPrefix < uint32(ones) {
errs = append(errs, errors.Errorf("hostPrefix %d is larger than its cidr %s",
cnet.HostPrefix, cnet.CIDR))
}
if int(cnet.HostPrefix) > bits-2 {
errs = append(errs, errors.Errorf("hostPrefix %d is too small, must be a /%d or larger",
cnet.HostPrefix, bits-2))
}
}
if err := pool.Add(*cidr); err != nil {
errs = append(errs, err)
}
}
if len(conf.ClusterNetwork) < 1 {
errs = append(errs, errors.Errorf("spec.clusterNetwork must have at least 1 entry"))
}
if len(errs) == 0 && (ipv4Cluster != ipv4Service || ipv6Cluster != ipv6Service) {
errs = append(errs, errors.Errorf("spec.clusterNetwork and spec.serviceNetwork must either both be IPv4-only, both be IPv6-only, or both be dual-stack"))
}
return errs
}
// validateMultus validates the combination of DisableMultiNetwork and AddtionalNetworks
func validateMultus(conf *operv1.NetworkSpec) []error {
// DisableMultiNetwork defaults to false
deployMultus := true
if conf.DisableMultiNetwork != nil && *conf.DisableMultiNetwork {
deployMultus = false
}
// Additional Networks are useless without Multus, so don't let them
// exist without Multus and confuse things (for now)
if !deployMultus && len(conf.AdditionalNetworks) > 0 {
return []error{errors.Errorf("additional networks cannot be specified without deploying Multus")}
}
return []error{}
}
// validateDefaultNetwork validates whichever network is specified
// as the default network.
func validateDefaultNetwork(conf *operv1.NetworkSpec) []error {
switch conf.DefaultNetwork.Type {
case operv1.NetworkTypeOpenShiftSDN:
return validateOpenShiftSDN(conf)
case operv1.NetworkTypeOVNKubernetes:
return validateOVNKubernetes(conf)
case operv1.NetworkTypeKuryr:
return validateKuryr(conf)
default:
return nil
}
}
// renderDefaultNetwork generates the manifests corresponding to the requested
// default network
func renderDefaultNetwork(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.BootstrapResult, manifestDir string) ([]*uns.Unstructured, error) {
dn := conf.DefaultNetwork
if errs := validateDefaultNetwork(conf); len(errs) > 0 {
return nil, errors.Errorf("invalid Default Network configuration: %v", errs)
}
switch dn.Type {
case operv1.NetworkTypeOpenShiftSDN:
return renderOpenShiftSDN(conf, bootstrapResult, manifestDir)
case operv1.NetworkTypeOVNKubernetes:
return renderOVNKubernetes(conf, bootstrapResult, manifestDir)
case operv1.NetworkTypeKuryr:
return renderKuryr(conf, bootstrapResult, manifestDir)
default:
log.Printf("NOTICE: Unknown network type %s, ignoring", dn.Type)
return nil, nil
}
}
func fillDefaultNetworkDefaults(conf, previous *operv1.NetworkSpec, hostMTU int) {
switch conf.DefaultNetwork.Type {
case operv1.NetworkTypeOpenShiftSDN:
fillOpenShiftSDNDefaults(conf, previous, hostMTU)
case operv1.NetworkTypeOVNKubernetes:
fillOVNKubernetesDefaults(conf, previous, hostMTU)
case operv1.NetworkTypeKuryr:
fillKuryrDefaults(conf, previous)
default:
}
}
func isDefaultNetworkChangeSafe(prev, next *operv1.NetworkSpec) []error {
if prev.DefaultNetwork.Type != next.DefaultNetwork.Type {
if prev.Migration == nil {
return []error{errors.Errorf("cannot change default network type when not doing migration")}
} else {
if prev.Migration.NetworkType != next.DefaultNetwork.Type {
return []error{errors.Errorf("can only change default network type to the target migration network type")}
}
}
}
if prev.Migration == nil {
switch prev.DefaultNetwork.Type {
case operv1.NetworkTypeOpenShiftSDN:
return isOpenShiftSDNChangeSafe(prev, next)
case operv1.NetworkTypeOVNKubernetes:
return isOVNKubernetesChangeSafe(prev, next)
case operv1.NetworkTypeKuryr:
return isKuryrChangeSafe(prev, next)
default:
return nil
}
}
return nil
}
func isMigrationChangeSafe(prev, next *operv1.NetworkSpec) []error {
if prev.Migration != nil && next.Migration != nil && prev.Migration.NetworkType != next.Migration.NetworkType {
return []error{errors.Errorf("cannot change migration network type after migration is start")}
}
return nil
}
// ValidateAdditionalNetworks validates additional networks configs
func validateAdditionalNetworks(conf *operv1.NetworkSpec) []error {
out := []error{}
ans := conf.AdditionalNetworks
for _, an := range ans {
switch an.Type {
case operv1.NetworkTypeRaw:
if errs := validateRaw(&an); len(errs) > 0 {
out = append(out, errs...)
}
case operv1.NetworkTypeSimpleMacvlan:
if errs := validateSimpleMacvlanConfig(&an); len(errs) > 0 {
out = append(out, errs...)
}
default:
out = append(out, errors.Errorf("unknown or unsupported NetworkType: %s", an.Type))
}
}
return out
}
// renderAdditionalNetworks generates the manifests of the requested additional networks
func renderAdditionalNetworks(conf *operv1.NetworkSpec, manifestDir string) ([]*uns.Unstructured, error) {
ans := conf.AdditionalNetworks
out := []*uns.Unstructured{}
// validate additional network configuration
if errs := validateAdditionalNetworks(conf); len(errs) > 0 {
return nil, errors.Errorf("invalid Additional Network Configuration: %v", errs)
}
if len(ans) == 0 {
return nil, nil
}
// render additional network configuration
for _, an := range ans {
switch an.Type {
case operv1.NetworkTypeRaw:
objs, err := renderRawCNIConfig(&an, manifestDir)
if err != nil {
return nil, err
}
out = append(out, objs...)
case operv1.NetworkTypeSimpleMacvlan:
objs, err := renderSimpleMacvlanConfig(&an, manifestDir)
if err != nil {
return nil, err
}
out = append(out, objs...)
default:
return nil, errors.Errorf("unknown or unsupported NetworkType: %s", an.Type)
}
}
return out, nil
}
// renderMultusAdmissionController generates the manifests of Multus Admission Controller
func renderMultusAdmissionController(conf *operv1.NetworkSpec, manifestDir string) ([]*uns.Unstructured, error) {
if *conf.DisableMultiNetwork {
return nil, nil
}
var err error
out := []*uns.Unstructured{}
objs, err := renderMultusAdmissonControllerConfig(manifestDir)
if err != nil {
return nil, err
}
out = append(out, objs...)
return out, nil
}
// renderMultiNetworkpolicy generates the manifests of MultiNetworkPolicy
func renderMultiNetworkpolicy(conf *operv1.NetworkSpec, manifestDir string) ([]*uns.Unstructured, error) {
// disable it if DisableMultiNetwork = true
if *conf.DisableMultiNetwork {
return nil, nil
}
if conf.UseMultiNetworkPolicy == nil || !*conf.UseMultiNetworkPolicy {
return nil, nil
}
var err error
out := []*uns.Unstructured{}
objs, err := renderMultiNetworkpolicyConfig(manifestDir)
if err != nil {
return nil, err
}
out = append(out, objs...)
return out, nil
}
// renderNetworkDiagnostics renders the connectivity checks
func renderNetworkDiagnostics(conf *operv1.NetworkSpec, manifestDir string) ([]*uns.Unstructured, error) {
if conf.DisableNetworkDiagnostics {
return nil, nil
}
data := render.MakeRenderData()
data.Data["ReleaseVersion"] = os.Getenv("RELEASE_VERSION")
data.Data["NetworkCheckSourceImage"] = os.Getenv("NETWORK_CHECK_SOURCE_IMAGE")
data.Data["NetworkCheckTargetImage"] = os.Getenv("NETWORK_CHECK_TARGET_IMAGE")
manifests, err := render.RenderDir(filepath.Join(manifestDir, "network-diagnostics"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render network-diagnostics manifests")
}
return manifests, nil
}
| [
"\"RELEASE_VERSION\"",
"\"NETWORK_CHECK_SOURCE_IMAGE\"",
"\"NETWORK_CHECK_TARGET_IMAGE\""
]
| []
| [
"NETWORK_CHECK_SOURCE_IMAGE",
"NETWORK_CHECK_TARGET_IMAGE",
"RELEASE_VERSION"
]
| [] | ["NETWORK_CHECK_SOURCE_IMAGE", "NETWORK_CHECK_TARGET_IMAGE", "RELEASE_VERSION"] | go | 3 | 0 | |
jobbergate_api/asgi.py | import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobbergate_api.settings")
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
deploy/settings.py | """
Django settings for deploy project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "abui+gd90l4@09maintgz8b9+y#p@&_+9&w(wu=9bpii1@iihr"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'deploy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'deploy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default=os.getenv('DATABASE_URL')
)
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
feapder/utils/tools.py | # -*- coding: utf-8 -*-
"""
Created on 2018-09-06 14:21
---------
@summary: 工具
---------
@author: Boris
@email: [email protected]
"""
import calendar
import codecs
import configparser # 读配置文件的
import datetime
import functools
import hashlib
import html
import json
import os
import pickle
import random
import re
import socket
import ssl
import string
import sys
import time
import traceback
import urllib
import urllib.parse
import uuid
import weakref
from hashlib import md5
from pprint import pformat
from pprint import pprint
from urllib import request
from urllib.parse import urljoin
import execjs # pip install PyExecJS
import redis
import requests
import six
from requests.cookies import RequestsCookieJar
from w3lib.url import canonicalize_url as _canonicalize_url
import feapder.setting as setting
from feapder.utils.email_sender import EmailSender
from feapder.utils.log import log
os.environ["EXECJS_RUNTIME"] = "Node" # 设置使用node执行js
# 全局取消ssl证书验证
ssl._create_default_https_context = ssl._create_unverified_context
TIME_OUT = 30
TIMER_TIME = 5
redisdb = None
def get_redisdb():
global redisdb
if not redisdb:
ip, port = setting.REDISDB_IP_PORTS.split(":")
redisdb = redis.Redis(
host=ip,
port=port,
db=setting.REDISDB_DB,
password=setting.REDISDB_USER_PASS,
decode_responses=True,
) # redis默认端口是6379
return redisdb
# 装饰器
class Singleton(object):
def __init__(self, cls):
self._cls = cls
self._instance = {}
def __call__(self, *args, **kwargs):
if self._cls not in self._instance:
self._instance[self._cls] = self._cls(*args, **kwargs)
return self._instance[self._cls]
def log_function_time(func):
try:
@functools.wraps(func) # 将函数的原来属性付给新函数
def calculate_time(*args, **kw):
began_time = time.time()
callfunc = func(*args, **kw)
end_time = time.time()
log.debug(func.__name__ + " run time = " + str(end_time - began_time))
return callfunc
return calculate_time
except:
log.debug("求取时间无效 因为函数参数不符")
return func
def run_safe_model(module_name):
def inner_run_safe_model(func):
try:
@functools.wraps(func) # 将函数的原来属性付给新函数
def run_func(*args, **kw):
callfunc = None
try:
callfunc = func(*args, **kw)
except Exception as e:
log.error(module_name + ": " + func.__name__ + " - " + str(e))
traceback.print_exc()
return callfunc
return run_func
except Exception as e:
log.error(module_name + ": " + func.__name__ + " - " + str(e))
traceback.print_exc()
return func
return inner_run_safe_model
def memoizemethod_noargs(method):
"""Decorator to cache the result of a method (without arguments) using a
weak reference to its object
"""
cache = weakref.WeakKeyDictionary()
@functools.wraps(method)
def new_method(self, *args, **kwargs):
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
########################【网页解析相关】###############################
# @log_function_time
def get_html_by_requests(
url, headers=None, code="utf-8", data=None, proxies={}, with_response=False
):
html = ""
r = None
try:
if data:
r = requests.post(
url, headers=headers, timeout=TIME_OUT, data=data, proxies=proxies
)
else:
r = requests.get(url, headers=headers, timeout=TIME_OUT, proxies=proxies)
if code:
r.encoding = code
html = r.text
except Exception as e:
log.error(e)
finally:
r and r.close()
if with_response:
return html, r
else:
return html
def get_json_by_requests(
url,
params=None,
headers=None,
data=None,
proxies={},
with_response=False,
cookies=None,
):
json = {}
response = None
try:
# response = requests.get(url, params = params)
if data:
response = requests.post(
url,
headers=headers,
data=data,
params=params,
timeout=TIME_OUT,
proxies=proxies,
cookies=cookies,
)
else:
response = requests.get(
url,
headers=headers,
params=params,
timeout=TIME_OUT,
proxies=proxies,
cookies=cookies,
)
response.encoding = "utf-8"
json = response.json()
except Exception as e:
log.error(e)
finally:
response and response.close()
if with_response:
return json, response
else:
return json
def get_cookies(response):
cookies = requests.utils.dict_from_cookiejar(response.cookies)
return cookies
def get_cookies_from_str(cookie_str):
"""
>>> get_cookies_from_str("key=value; key2=value2; key3=; key4=")
"{'key': 'value', 'key2': 'value2', 'key3': '', 'key4': ''}"
Args:
cookie_str: key=value; key2=value2; key3=; key4=
Returns:
"""
cookies = {}
for cookie in cookie_str.split(";"):
key, value = cookie.split("=", 1)
key = key.strip()
value = value.strip()
cookies[key] = value
return cookies
def get_cookies_jar(cookies):
"""
@summary: 适用于selenium生成的cookies转requests的cookies
requests.get(xxx, cookies=jar)
参考:https://www.cnblogs.com/small-bud/p/9064674.html
---------
@param cookies: [{},{}]
---------
@result: cookie jar
"""
cookie_jar = RequestsCookieJar()
for cookie in cookies:
cookie_jar.set(cookie["name"], cookie["value"])
return cookie_jar
def get_cookies_from_selenium_cookie(cookies):
"""
@summary: 适用于selenium生成的cookies转requests的cookies
requests.get(xxx, cookies=jar)
参考:https://www.cnblogs.com/small-bud/p/9064674.html
---------
@param cookies: [{},{}]
---------
@result: cookie jar
"""
cookie_dict = {}
for cookie in cookies:
if cookie.get("name"):
cookie_dict[cookie["name"]] = cookie["value"]
return cookie_dict
def cookiesjar2str(cookies):
str_cookie = ""
for k, v in requests.utils.dict_from_cookiejar(cookies).items():
str_cookie += k
str_cookie += "="
str_cookie += v
str_cookie += "; "
return str_cookie
def cookies2str(cookies):
str_cookie = ""
for k, v in cookies.items():
str_cookie += k
str_cookie += "="
str_cookie += v
str_cookie += "; "
return str_cookie
def get_urls(
html,
stop_urls=(
"javascript",
"+",
".css",
".js",
".rar",
".xls",
".exe",
".apk",
".doc",
".jpg",
".png",
".flv",
".mp4",
),
):
# 不匹配javascript、 +、 # 这样的url
regex = r'<a.*?href.*?=.*?["|\'](.*?)["|\']'
urls = get_info(html, regex)
urls = sorted(set(urls), key=urls.index)
if stop_urls:
stop_urls = isinstance(stop_urls, str) and [stop_urls] or stop_urls
use_urls = []
for url in urls:
for stop_url in stop_urls:
if stop_url in url:
break
else:
use_urls.append(url)
urls = use_urls
return urls
def get_full_url(root_url, sub_url):
"""
@summary: 得到完整的ur
---------
@param root_url: 根url (网页的url)
@param sub_url: 子url (带有相对路径的 可以拼接成完整的)
---------
@result: 返回完整的url
"""
return urljoin(root_url, sub_url)
def joint_url(url, params):
# param_str = "?"
# for key, value in params.items():
# value = isinstance(value, str) and value or str(value)
# param_str += key + "=" + value + "&"
#
# return url + param_str[:-1]
if not params:
return url
params = urlencode(params)
separator = "?" if "?" not in url else "&"
return url + separator + params
def canonicalize_url(url):
"""
url 归一化 会参数排序 及去掉锚点
"""
return _canonicalize_url(url)
def get_url_md5(url):
url = canonicalize_url(url)
url = re.sub("^http://", "https://", url)
return get_md5(url)
def fit_url(urls, identis):
identis = isinstance(identis, str) and [identis] or identis
fit_urls = []
for link in urls:
for identi in identis:
if identi in link:
fit_urls.append(link)
return list(set(fit_urls))
def get_param(url, key):
params = url.split("?")[-1].split("&")
for param in params:
key_value = param.split("=", 1)
if key == key_value[0]:
return key_value[1]
return None
def urlencode(params):
"""
字典类型的参数转为字符串
@param params:
{
'a': 1,
'b': 2
}
@return: a=1&b=2
"""
return urllib.parse.urlencode(params)
def urldecode(url):
"""
将字符串类型的参数转为json
@param url: xxx?a=1&b=2
@return:
{
'a': 1,
'b': 2
}
"""
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key, value = param.split("=")
params_json[key] = unquote_url(value)
return params_json
def unquote_url(url, encoding="utf-8"):
"""
@summary: 将url解码
---------
@param url:
---------
@result:
"""
return urllib.parse.unquote(url, encoding=encoding)
def quote_url(url, encoding="utf-8"):
"""
@summary: 将url编码 编码意思http://www.w3school.com.cn/tags/html_ref_urlencode.html
---------
@param url:
---------
@result:
"""
return urllib.parse.quote(url, safe="%;/?:@&=+$,", encoding=encoding)
def quote_chinese_word(text, encoding="utf-8"):
def quote_chinese_word_func(text):
chinese_word = text.group(0)
return urllib.parse.quote(chinese_word, encoding=encoding)
return re.sub("([\u4e00-\u9fa5]+)", quote_chinese_word_func, text, flags=re.S)
def unescape(str):
"""
反转译
"""
return html.unescape(str)
def excape(str):
"""
转译
"""
return html.escape(str)
_regexs = {}
# @log_function_time
def get_info(html, regexs, allow_repeat=True, fetch_one=False, split=None):
regexs = isinstance(regexs, str) and [regexs] or regexs
infos = []
for regex in regexs:
if regex == "":
continue
if regex not in _regexs.keys():
_regexs[regex] = re.compile(regex, re.S)
if fetch_one:
infos = _regexs[regex].search(html)
if infos:
infos = infos.groups()
else:
continue
else:
infos = _regexs[regex].findall(str(html))
if len(infos) > 0:
# print(regex)
break
if fetch_one:
infos = infos if infos else ("",)
return infos if len(infos) > 1 else infos[0]
else:
infos = allow_repeat and infos or sorted(set(infos), key=infos.index)
infos = split.join(infos) if split else infos
return infos
def table_json(table, save_one_blank=True):
"""
将表格转为json 适应于 key:value 在一行类的表格
@param table: 使用selector封装后的具有xpath的selector
@param save_one_blank: 保留一个空白符
@return:
"""
data = {}
trs = table.xpath(".//tr")
for tr in trs:
tds = tr.xpath("./td|./th")
for i in range(0, len(tds), 2):
if i + 1 > len(tds) - 1:
break
key = tds[i].xpath("string(.)").extract_first(default="").strip()
value = tds[i + 1].xpath("string(.)").extract_first(default="").strip()
value = replace_str(value, "[\f\n\r\t\v]", "")
value = replace_str(value, " +", " " if save_one_blank else "")
if key:
data[key] = value
return data
def get_table_row_data(table):
"""
获取表格里每一行数据
@param table: 使用selector封装后的具有xpath的selector
@return: [[],[]..]
"""
datas = []
rows = table.xpath(".//tr")
for row in rows:
cols = row.xpath("./td|./th")
row_datas = []
for col in cols:
data = col.xpath("string(.)").extract_first(default="").strip()
row_datas.append(data)
datas.append(row_datas)
return datas
def rows2json(rows, keys=None):
"""
将行数据转为json
@param rows: 每一行的数据
@param keys: json的key,空时将rows的第一行作为key
@return:
"""
data_start_pos = 0 if keys else 1
datas = []
keys = keys or rows[0]
for values in rows[data_start_pos:]:
datas.append(dict(zip(keys, values)))
return datas
def get_form_data(form):
"""
提取form中提交的数据
:param form: 使用selector封装后的具有xpath的selector
:return:
"""
data = {}
inputs = form.xpath(".//input")
for input in inputs:
name = input.xpath("./@name").extract_first()
value = input.xpath("./@value").extract_first()
if name:
data[name] = value
return data
# mac上不好使
# def get_domain(url):
# domain = ''
# try:
# domain = get_tld(url)
# except Exception as e:
# log.debug(e)
# return domain
def get_domain(url):
proto, rest = urllib.parse.splittype(url)
domain, rest = urllib.parse.splithost(rest)
return domain
def get_index_url(url):
return "/".join(url.split("/")[:3])
def get_ip(domain):
ip = socket.getaddrinfo(domain, "http")[0][4][0]
return ip
def get_localhost_ip():
"""
利用 UDP 协议来实现的,生成一个UDP包,把自己的 IP 放如到 UDP 协议头中,然后从UDP包中获取本机的IP。
这个方法并不会真实的向外部发包,所以用抓包工具是看不到的
:return:
"""
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
finally:
if s:
s.close()
return ip
def ip_to_num(ip):
import struct
ip_num = socket.ntohl(struct.unpack("I", socket.inet_aton(str(ip)))[0])
return ip_num
def is_valid_proxy(proxy, check_url=None):
"""
检验代理是否有效
@param proxy: xxx.xxx.xxx:xxx
@param check_url: 利用目标网站检查,目标网站url。默认为None, 使用代理服务器的socket检查, 但不能排除Connection closed by foreign host
@return: True / False
"""
is_valid = False
if check_url:
proxies = {"http": f"http://{proxy}", "https": f"https://{proxy}"}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
response = None
try:
response = requests.get(
check_url, headers=headers, proxies=proxies, stream=True, timeout=20
)
is_valid = True
except Exception as e:
log.error("check proxy failed: {} {}".format(e, proxy))
finally:
if response:
response.close()
else:
ip, port = proxy.split(":")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sk:
sk.settimeout(7)
try:
sk.connect((ip, int(port))) # 检查代理服务器是否开着
is_valid = True
except Exception as e:
log.error("check proxy failed: {} {}:{}".format(e, ip, port))
return is_valid
def is_valid_url(url):
"""
验证url是否合法
:param url:
:return:
"""
if re.match(r"(^https?:/{2}\w.+$)|(ftp://)", url):
return True
else:
return False
def get_text(soup, *args):
try:
return soup.get_text()
except Exception as e:
log.error(e)
return ""
def del_html_tag(content, except_line_break=False, save_img=False, white_replaced=""):
"""
删除html标签
@param content: html内容
@param except_line_break: 保留p标签
@param save_img: 保留图片
@param white_replaced: 空白符替换
@return:
"""
content = replace_str(content, "(?i)<script(.|\n)*?</script>") # (?)忽略大小写
content = replace_str(content, "(?i)<style(.|\n)*?</style>")
content = replace_str(content, "<!--(.|\n)*?-->")
content = replace_str(
content, "(?!&[a-z]+=)&[a-z]+;?"
) # 干掉 等无用的字符 但&xxx= 这种表示参数的除外
if except_line_break:
content = content.replace("</p>", "/p")
content = replace_str(content, "<[^p].*?>")
content = content.replace("/p", "</p>")
content = replace_str(content, "[ \f\r\t\v]")
elif save_img:
content = replace_str(content, "(?!<img.+?>)<.+?>") # 替换掉除图片外的其他标签
content = replace_str(content, "(?! +)\s+", "\n") # 保留空格
content = content.strip()
else:
content = replace_str(content, "<(.|\n)*?>")
content = replace_str(content, "\s", white_replaced)
content = content.strip()
return content
def del_html_js_css(content):
content = replace_str(content, "(?i)<script(.|\n)*?</script>") # (?)忽略大小写
content = replace_str(content, "(?i)<style(.|\n)*?</style>")
content = replace_str(content, "<!--(.|\n)*?-->")
return content
def is_have_chinese(content):
regex = "[\u4e00-\u9fa5]+"
chinese_word = get_info(content, regex)
return chinese_word and True or False
def is_have_english(content):
regex = "[a-zA-Z]+"
english_words = get_info(content, regex)
return english_words and True or False
def get_chinese_word(content):
regex = "[\u4e00-\u9fa5]+"
chinese_word = get_info(content, regex)
return chinese_word
def get_english_words(content):
regex = "[a-zA-Z]+"
english_words = get_info(content, regex)
return english_words or ""
##################################################
def get_json(json_str):
"""
@summary: 取json对象
---------
@param json_str: json格式的字符串
---------
@result: 返回json对象
"""
try:
return json.loads(json_str) if json_str else {}
except Exception as e1:
try:
json_str = json_str.strip()
json_str = json_str.replace("'", '"')
keys = get_info(json_str, "(\w+):")
for key in keys:
json_str = json_str.replace(key, '"%s"' % key)
return json.loads(json_str) if json_str else {}
except Exception as e2:
log.error(
"""
e1: %s
format json_str: %s
e2: %s
"""
% (e1, json_str, e2)
)
return {}
def jsonp2json(jsonp):
"""
将jsonp转为json
@param jsonp: jQuery172013600082560040794_1553230569815({})
@return:
"""
try:
return json.loads(re.match(".*?({.*}).*", jsonp, re.S).group(1))
except:
raise ValueError("Invalid Input")
def dumps_json(json_, indent=4, sort_keys=False):
"""
@summary: 格式化json 用于打印
---------
@param json_: json格式的字符串或json对象
---------
@result: 格式化后的字符串
"""
try:
if isinstance(json_, str):
json_ = get_json(json_)
json_ = json.dumps(
json_, ensure_ascii=False, indent=indent, skipkeys=True, sort_keys=sort_keys
)
except Exception as e:
log.error(e)
json_ = pformat(json_)
return json_
def get_json_value(json_object, key):
"""
@summary:
---------
@param json_object: json对象或json格式的字符串
@param key: 建值 如果在多个层级目录下 可写 key1.key2 如{'key1':{'key2':3}}
---------
@result: 返回对应的值,如果没有,返回''
"""
current_key = ""
value = ""
try:
json_object = (
isinstance(json_object, str) and get_json(json_object) or json_object
)
current_key = key.split(".")[0]
value = json_object[current_key]
key = key[key.find(".") + 1 :]
except Exception as e:
return value
if key == current_key:
return value
else:
return get_json_value(value, key)
def get_all_keys(datas, depth=None, current_depth=0):
"""
@summary: 获取json李所有的key
---------
@param datas: dict / list
@param depth: 字典key的层级 默认不限制层级 层级从1开始
@param current_depth: 字典key的当前层级 不用传参
---------
@result: 返回json所有的key
"""
keys = []
if depth and current_depth >= depth:
return keys
if isinstance(datas, list):
for data in datas:
keys.extend(get_all_keys(data, depth, current_depth=current_depth + 1))
elif isinstance(datas, dict):
for key, value in datas.items():
keys.append(key)
if isinstance(value, dict):
keys.extend(get_all_keys(value, depth, current_depth=current_depth + 1))
return keys
def to_chinese(unicode_str):
format_str = json.loads('{"chinese":"%s"}' % unicode_str)
return format_str["chinese"]
##################################################
def replace_str(source_str, regex, replace_str=""):
"""
@summary: 替换字符串
---------
@param source_str: 原字符串
@param regex: 正则
@param replace_str: 用什么来替换 默认为''
---------
@result: 返回替换后的字符串
"""
str_info = re.compile(regex)
return str_info.sub(replace_str, source_str)
def del_redundant_blank_character(text):
"""
删除冗余的空白符, 只保留一个
:param text:
:return:
"""
return re.sub("\s+", " ", text)
##################################################
def get_conf_value(config_file, section, key):
cp = configparser.ConfigParser(allow_no_value=True)
with codecs.open(config_file, "r", encoding="utf-8") as f:
cp.read_file(f)
return cp.get(section, key)
def mkdir(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
pass
def write_file(filename, content, mode="w", encoding="utf-8"):
"""
@summary: 写文件
---------
@param filename: 文件名(有路径)
@param content: 内容
@param mode: 模式 w/w+ (覆盖/追加)
---------
@result:
"""
directory = os.path.dirname(filename)
mkdir(directory)
with open(filename, mode, encoding=encoding) as file:
file.writelines(content)
def read_file(filename, readlines=False, encoding="utf-8"):
"""
@summary: 读文件
---------
@param filename: 文件名(有路径)
@param readlines: 按行读取 (默认False)
---------
@result: 按行读取返回List,否则返回字符串
"""
content = None
try:
with open(filename, "r", encoding=encoding) as file:
content = file.readlines() if readlines else file.read()
except Exception as e:
log.error(e)
return content
def get_oss_file_list(oss_handler, prefix, date_range_min, date_range_max=None):
"""
获取文件列表
@param prefix: 路径前缀 如 data/car_service_line/yiche/yiche_serial_zongshu_info
@param date_range_min: 时间范围 最小值 日期分隔符为/ 如 2019/03/01 或 2019/03/01/00/00/00
@param date_range_max: 时间范围 最大值 日期分隔符为/ 如 2019/03/01 或 2019/03/01/00/00/00
@return: 每个文件路径 如 html/e_commerce_service_line/alibaba/alibaba_shop_info/2019/03/22/15/53/15/8ca8b9e4-4c77-11e9-9dee-acde48001122.json.snappy
"""
# 计算时间范围
date_range_max = date_range_max or date_range_min
date_format = "/".join(
["%Y", "%m", "%d", "%H", "%M", "%S"][: date_range_min.count("/") + 1]
)
time_interval = [
{"days": 365},
{"days": 31},
{"days": 1},
{"hours": 1},
{"minutes": 1},
{"seconds": 1},
][date_range_min.count("/")]
date_range = get_between_date(
date_range_min, date_range_max, date_format=date_format, **time_interval
)
for date in date_range:
file_folder_path = os.path.join(prefix, date)
objs = oss_handler.list(prefix=file_folder_path)
for obj in objs:
filename = obj.key
yield filename
def is_html(url):
if not url:
return False
try:
content_type = request.urlopen(url).info().get("Content-Type", "")
if "text/html" in content_type:
return True
else:
return False
except Exception as e:
log.error(e)
return False
def is_exist(file_path):
"""
@summary: 文件是否存在
---------
@param file_path:
---------
@result:
"""
return os.path.exists(file_path)
def download_file(url, base_path, filename="", call_func="", proxies=None, data=None):
file_path = base_path + filename
directory = os.path.dirname(file_path)
mkdir(directory)
# 进度条
def progress_callfunc(blocknum, blocksize, totalsize):
"""回调函数
@blocknum : 已经下载的数据块
@blocksize : 数据块的大小
@totalsize: 远程文件的大小
"""
percent = 100.0 * blocknum * blocksize / totalsize
if percent > 100:
percent = 100
# print ('进度条 %.2f%%' % percent, end = '\r')
sys.stdout.write("进度条 %.2f%%" % percent + "\r")
sys.stdout.flush()
if url:
try:
log.debug(
"""
正在下载 %s
存储路径 %s
"""
% (url, file_path)
)
if proxies:
# create the object, assign it to a variable
proxy = request.ProxyHandler(proxies)
# construct a new opener using your proxy settings
opener = request.build_opener(proxy)
# install the openen on the module-level
request.install_opener(opener)
request.urlretrieve(url, file_path, progress_callfunc, data)
log.debug(
"""
下载完毕 %s
文件路径 %s
"""
% (url, file_path)
)
call_func and call_func()
return 1
except Exception as e:
log.error(e)
return 0
else:
return 0
def get_file_list(path, ignore=[]):
templist = path.split("*")
path = templist[0]
file_type = templist[1] if len(templist) >= 2 else ""
# 递归遍历文件
def get_file_list_(path, file_type, ignore, all_file=[]):
file_list = os.listdir(path)
for file_name in file_list:
if file_name in ignore:
continue
file_path = os.path.join(path, file_name)
if os.path.isdir(file_path):
get_file_list_(file_path, file_type, ignore, all_file)
else:
if not file_type or file_name.endswith(file_type):
all_file.append(file_path)
return all_file
return get_file_list_(path, file_type, ignore) if os.path.isdir(path) else [path]
def rename_file(old_name, new_name):
os.rename(old_name, new_name)
def del_file(path, ignore=()):
files = get_file_list(path, ignore)
for file in files:
try:
os.remove(file)
except Exception as e:
log.error(
"""
删除出错: %s
Exception : %s
"""
% (file, str(e))
)
finally:
pass
def get_file_type(file_name):
"""
@summary: 取文件后缀名
---------
@param file_name:
---------
@result:
"""
try:
return os.path.splitext(file_name)[1]
except Exception as e:
log.exception(e)
def get_file_path(file_path):
"""
@summary: 取文件路径
---------
@param file_path: /root/a.py
---------
@result: /root
"""
try:
return os.path.split(file_path)[0]
except Exception as e:
log.exception(e)
#############################################
def exec_js(js_code):
"""
@summary: 执行js代码
---------
@param js_code: js代码
---------
@result: 返回执行结果
"""
return execjs.eval(js_code)
def compile_js(js_func):
"""
@summary: 编译js函数
---------
@param js_func:js函数
---------
@result: 返回函数对象 调用 fun('js_funName', param1,param2)
"""
ctx = execjs.compile(js_func)
return ctx.call
###############################################
#############################################
def date_to_timestamp(date, time_format="%Y-%m-%d %H:%M:%S"):
"""
@summary:
---------
@param date:将"2011-09-28 10:00:00"时间格式转化为时间戳
@param format:时间格式
---------
@result: 返回时间戳
"""
timestamp = time.mktime(time.strptime(date, time_format))
return int(timestamp)
def timestamp_to_date(timestamp, time_format="%Y-%m-%d %H:%M:%S"):
"""
@summary:
---------
@param timestamp: 将时间戳转化为日期
@param format: 日期格式
---------
@result: 返回日期
"""
if timestamp is None:
raise ValueError("timestamp is null")
date = time.localtime(timestamp)
return time.strftime(time_format, date)
def get_current_timestamp():
return int(time.time())
def get_current_date(date_format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.now().strftime(date_format)
# return time.strftime(date_format, time.localtime(time.time()))
def get_date_number(year=None, month=None, day=None):
"""
@summary: 获取指定日期对应的日期数
默认当前周
---------
@param year: 2010
@param month: 6
@param day: 16
---------
@result: (年号,第几周,第几天) 如 (2010, 24, 3)
"""
if year and month and day:
return datetime.date(year, month, day).isocalendar()
elif not any([year, month, day]):
return datetime.datetime.now().isocalendar()
else:
assert year, "year 不能为空"
assert month, "month 不能为空"
assert day, "day 不能为空"
def get_between_date(
begin_date, end_date=None, date_format="%Y-%m-%d", **time_interval
):
"""
@summary: 获取一段时间间隔内的日期,默认为每一天
---------
@param begin_date: 开始日期 str 如 2018-10-01
@param end_date: 默认为今日
@param date_format: 日期格式,应与begin_date的日期格式相对应
@param time_interval: 时间间隔 默认一天 支持 days、seconds、microseconds、milliseconds、minutes、hours、weeks
---------
@result: list 值为字符串
"""
date_list = []
begin_date = datetime.datetime.strptime(begin_date, date_format)
end_date = (
datetime.datetime.strptime(end_date, date_format)
if end_date
else datetime.datetime.strptime(
time.strftime(date_format, time.localtime(time.time())), date_format
)
)
time_interval = time_interval or dict(days=1)
while begin_date <= end_date:
date_str = begin_date.strftime(date_format)
date_list.append(date_str)
begin_date += datetime.timedelta(**time_interval)
if end_date.strftime(date_format) not in date_list:
date_list.append(end_date.strftime(date_format))
return date_list
def get_between_months(begin_date, end_date=None):
"""
@summary: 获取一段时间间隔内的月份
需要满一整月
---------
@param begin_date: 开始时间 如 2018-01-01
@param end_date: 默认当前时间
---------
@result: 列表 如 ['2018-01', '2018-02']
"""
def add_months(dt, months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year=year, month=month, day=day)
date_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
end_date = (
datetime.datetime.strptime(end_date, "%Y-%m-%d")
if end_date
else datetime.datetime.strptime(
time.strftime("%Y-%m-%d", time.localtime(time.time())), "%Y-%m-%d"
)
)
while begin_date <= end_date:
date_str = begin_date.strftime("%Y-%m")
date_list.append(date_str)
begin_date = add_months(begin_date, 1)
return date_list
def get_today_of_day(day_offset=0):
return str(datetime.date.today() + datetime.timedelta(days=day_offset))
def get_days_of_month(year, month):
"""
返回天数
"""
return calendar.monthrange(year, month)[1]
def get_firstday_of_month(date):
"""''
date format = "YYYY-MM-DD"
"""
year, month, day = date.split("-")
year, month, day = int(year), int(month), int(day)
days = "01"
if int(month) < 10:
month = "0" + str(int(month))
arr = (year, month, days)
return "-".join("%s" % i for i in arr)
def get_lastday_of_month(date):
"""''
get the last day of month
date format = "YYYY-MM-DD"
"""
year, month, day = date.split("-")
year, month, day = int(year), int(month), int(day)
days = calendar.monthrange(year, month)[1]
month = add_zero(month)
arr = (year, month, days)
return "-".join("%s" % i for i in arr)
def get_firstday_month(month_offset=0):
"""''
get the first day of month from today
month_offset is how many months
"""
(y, m, d) = get_year_month_and_days(month_offset)
d = "01"
arr = (y, m, d)
return "-".join("%s" % i for i in arr)
def get_lastday_month(month_offset=0):
"""''
get the last day of month from today
month_offset is how many months
"""
return "-".join("%s" % i for i in get_year_month_and_days(month_offset))
def get_last_month(month_offset=0):
"""''
get the last day of month from today
month_offset is how many months
"""
return "-".join("%s" % i for i in get_year_month_and_days(month_offset)[:2])
def get_year_month_and_days(month_offset=0):
"""
@summary:
---------
@param month_offset: 月份偏移量
---------
@result: ('2019', '04', '30')
"""
today = datetime.datetime.now()
year, month = today.year, today.month
this_year = int(year)
this_month = int(month)
total_month = this_month + month_offset
if month_offset >= 0:
if total_month <= 12:
days = str(get_days_of_month(this_year, total_month))
total_month = add_zero(total_month)
return (year, total_month, days)
else:
i = total_month // 12
j = total_month % 12
if j == 0:
i -= 1
j = 12
this_year += i
days = str(get_days_of_month(this_year, j))
j = add_zero(j)
return (str(this_year), str(j), days)
else:
if (total_month > 0) and (total_month < 12):
days = str(get_days_of_month(this_year, total_month))
total_month = add_zero(total_month)
return (year, total_month, days)
else:
i = total_month // 12
j = total_month % 12
if j == 0:
i -= 1
j = 12
this_year += i
days = str(get_days_of_month(this_year, j))
j = add_zero(j)
return (str(this_year), str(j), days)
def add_zero(n):
return "%02d" % n
def get_month(month_offset=0):
"""''
获取当前日期前后N月的日期
if month_offset>0, 获取当前日期前N月的日期
if month_offset<0, 获取当前日期后N月的日期
date format = "YYYY-MM-DD"
"""
today = datetime.datetime.now()
day = add_zero(today.day)
(y, m, d) = get_year_month_and_days(month_offset)
arr = (y, m, d)
if int(day) < int(d):
arr = (y, m, day)
return "-".join("%s" % i for i in arr)
@run_safe_model("format_date")
def format_date(date, old_format="", new_format="%Y-%m-%d %H:%M:%S"):
"""
@summary: 格式化日期格式
---------
@param date: 日期 eg:2017年4月17日 3时27分12秒
@param old_format: 原来的日期格式 如 '%Y年%m月%d日 %H时%M分%S秒'
%y 两位数的年份表示(00-99)
%Y 四位数的年份表示(000-9999)
%m 月份(01-12)
%d 月内中的一天(0-31)
%H 24小时制小时数(0-23)
%I 12小时制小时数(01-12)
%M 分钟数(00-59)
%S 秒(00-59)
@param new_format: 输出的日期格式
---------
@result: 格式化后的日期,类型为字符串 如2017-4-17 3:27:12
"""
if not date:
return ""
if not old_format:
regex = "(\d+)"
numbers = get_info(date, regex, allow_repeat=True)
formats = ["%Y", "%m", "%d", "%H", "%M", "%S"]
old_format = date
for i, number in enumerate(numbers[:6]):
if i == 0 and len(number) == 2: # 年份可能是两位 用小%y
old_format = old_format.replace(
number, formats[i].lower(), 1
) # 替换一次 '2017年11月30日 11:49' 防止替换11月时,替换11小时
else:
old_format = old_format.replace(number, formats[i], 1) # 替换一次
try:
date_obj = datetime.datetime.strptime(date, old_format)
if "T" in date and "Z" in date:
date_obj += datetime.timedelta(hours=8)
date_str = date_obj.strftime("%Y-%m-%d %H:%M:%S")
else:
date_str = datetime.datetime.strftime(date_obj, new_format)
except Exception as e:
log.error("日期格式化出错,old_format = %s 不符合 %s 格式" % (old_format, date))
date_str = date
return date_str
@run_safe_model("format_time")
def format_time(release_time, date_format="%Y-%m-%d %H:%M:%S"):
if "年前" in release_time:
years = re.compile("(\d+)年前").findall(release_time)
years_ago = datetime.datetime.now() - datetime.timedelta(
days=int(years[0]) * 365
)
release_time = years_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "月前" in release_time:
months = re.compile("(\d+)月前").findall(release_time)
months_ago = datetime.datetime.now() - datetime.timedelta(
days=int(months[0]) * 30
)
release_time = months_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "周前" in release_time:
weeks = re.compile("(\d+)周前").findall(release_time)
weeks_ago = datetime.datetime.now() - datetime.timedelta(days=int(weeks[0]) * 7)
release_time = weeks_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "天前" in release_time:
ndays = re.compile("(\d+)天前").findall(release_time)
days_ago = datetime.datetime.now() - datetime.timedelta(days=int(ndays[0]))
release_time = days_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "小时前" in release_time:
nhours = re.compile("(\d+)小时前").findall(release_time)
hours_ago = datetime.datetime.now() - datetime.timedelta(hours=int(nhours[0]))
release_time = hours_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "分钟前" in release_time:
nminutes = re.compile("(\d+)分钟前").findall(release_time)
minutes_ago = datetime.datetime.now() - datetime.timedelta(
minutes=int(nminutes[0])
)
release_time = minutes_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "昨天" in release_time or "昨日" in release_time:
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
release_time = release_time.replace("昨天", str(yesterday))
elif "今天" in release_time:
release_time = release_time.replace("今天", get_current_date("%Y-%m-%d"))
elif "刚刚" in release_time:
release_time = get_current_date()
elif re.search("^\d\d:\d\d", release_time):
release_time = get_current_date("%Y-%m-%d") + " " + release_time
elif not re.compile("\d{4}").findall(release_time):
month = re.compile("\d{1,2}").findall(release_time)
if month and int(month[0]) <= int(get_current_date("%m")):
release_time = get_current_date("%Y") + "-" + release_time
else:
release_time = str(int(get_current_date("%Y")) - 1) + "-" + release_time
release_time = format_date(release_time, new_format=date_format)
return release_time
def to_date(date_str, date_format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.strptime(date_str, date_format)
def get_before_date(
current_date,
days,
current_date_format="%Y-%m-%d %H:%M:%S",
return_date_format="%Y-%m-%d %H:%M:%S",
):
"""
@summary: 获取之前时间
---------
@param current_date: 当前时间 str类型
@param days: 时间间隔 -1 表示前一天 1 表示后一天
@param days: 返回的时间格式
---------
@result: 字符串
"""
current_date = to_date(current_date, current_date_format)
date_obj = current_date + datetime.timedelta(days=days)
return datetime.datetime.strftime(date_obj, return_date_format)
def delay_time(sleep_time=60):
"""
@summary: 睡眠 默认1分钟
---------
@param sleep_time: 以秒为单位
---------
@result:
"""
time.sleep(sleep_time)
def format_seconds(seconds):
"""
@summary: 将秒转为时分秒
---------
@param seconds:
---------
@result: 2天3小时2分49秒
"""
seconds = int(seconds + 0.5) # 向上取整
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = ""
if d:
times += "{}天".format(d)
if h:
times += "{}小时".format(h)
if m:
times += "{}分".format(m)
if s:
times += "{}秒".format(s)
return times
################################################
def get_md5(*args):
"""
@summary: 获取唯一的32位md5
---------
@param *args: 参与联合去重的值
---------
@result: 7c8684bcbdfcea6697650aa53d7b1405
"""
m = hashlib.md5()
for arg in args:
m.update(str(arg).encode())
return m.hexdigest()
def get_sha1(*args):
"""
@summary: 获取唯一的40位值, 用于获取唯一的id
---------
@param *args: 参与联合去重的值
---------
@result: ba4868b3f277c8e387b55d9e3d0be7c045cdd89e
"""
sha1 = hashlib.sha1()
for arg in args:
sha1.update(str(arg).encode())
return sha1.hexdigest() # 40位
def get_base64(secret, message):
"""
@summary: 数字证书签名算法是:"HMAC-SHA256"
参考:https://www.jokecamp.com/blog/examples-of-creating-base64-hashes-using-hmac-sha256-in-different-languages/
---------
@param secret: 秘钥
@param message: 消息
---------
@result: 签名输出类型是:"base64"
"""
import hashlib
import hmac
import base64
message = bytes(message, "utf-8")
secret = bytes(secret, "utf-8")
signature = base64.b64encode(
hmac.new(secret, message, digestmod=hashlib.sha256).digest()
).decode("utf8")
return signature
def get_uuid(key1="", key2=""):
"""
@summary: 计算uuid值
可用于将两个字符串组成唯一的值。如可将域名和新闻标题组成uuid,形成联合索引
---------
@param key1:str
@param key2:str
---------
@result:
"""
uuid_object = ""
if not key1 and not key2:
uuid_object = uuid.uuid1()
else:
hash = md5(bytes(key1, "utf-8") + bytes(key2, "utf-8")).digest()
uuid_object = uuid.UUID(bytes=hash[:16], version=3)
return str(uuid_object)
def get_hash(text):
return hash(text)
##################################################
def cut_string(text, length):
"""
@summary: 将文本按指定长度拆分
---------
@param text: 文本
@param length: 拆分长度
---------
@result: 返回按指定长度拆分后形成的list
"""
text_list = re.findall(".{%d}" % length, text, re.S)
leave_text = text[len(text_list) * length :]
if leave_text:
text_list.append(leave_text)
return text_list
def get_random_string(length=1):
random_string = "".join(random.sample(string.ascii_letters + string.digits, length))
return random_string
def get_random_password(length=8, special_characters=""):
"""
@summary: 创建随机密码 默认长度为8,包含大写字母、小写字母、数字
---------
@param length: 密码长度 默认8
@param special_characters: 特殊字符
---------
@result: 指定长度的密码
"""
while True:
random_password = "".join(
random.sample(
string.ascii_letters + string.digits + special_characters, length
)
)
if (
re.search("[0-9]", random_password)
and re.search("[A-Z]", random_password)
and re.search("[a-z]", random_password)
):
if not special_characters:
break
elif set(random_password).intersection(special_characters):
break
return random_password
def get_random_email(length=None, email_types: list = None, special_characters=""):
"""
随机生成邮箱
:param length: 邮箱长度
:param email_types: 邮箱类型
:param special_characters: 特殊字符
:return:
"""
if not length:
length = random.randint(4, 12)
if not email_types:
email_types = [
"qq.com",
"163.com",
"gmail.com",
"yahoo.com",
"hotmail.com",
"yeah.net",
"126.com",
"139.com",
"sohu.com",
]
email_body = get_random_password(length, special_characters)
email_type = random.choice(email_types)
email = email_body + "@" + email_type
return email
#################################
def dumps_obj(obj):
return pickle.dumps(obj)
def loads_obj(obj_str):
return pickle.loads(obj_str)
def get_method(obj, name):
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
log.error("Method %r not found in: %s" % (name, obj))
return None
def witch_workspace(project_path):
"""
@summary:
---------
@param project_path:
---------
@result:
"""
os.chdir(project_path) # 切换工作路经
############### 数据库相关 #######################
def format_sql_value(value):
if isinstance(value, str):
value = value.strip()
elif isinstance(value, (list, dict)):
value = dumps_json(value, indent=None)
elif isinstance(value, (datetime.date, datetime.time)):
value = str(value)
elif isinstance(value, bool):
value = int(value)
return value
def list2str(datas):
"""
列表转字符串
:param datas: [1, 2]
:return: (1, 2)
"""
data_str = str(tuple(datas))
data_str = re.sub(",\)$", ")", data_str)
return data_str
def make_insert_sql(
table, data, auto_update=False, update_columns=(), insert_ignore=False
):
"""
@summary: 适用于mysql, oracle数据库时间需要to_date 处理(TODO)
---------
@param table:
@param data: 表数据 json格式
@param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
@param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
@param insert_ignore: 数据存在忽略
---------
@result:
"""
keys = ["`{}`".format(key) for key in data.keys()]
keys = list2str(keys).replace("'", "")
values = [format_sql_value(value) for value in data.values()]
values = list2str(values)
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
update_columns_ = ", ".join(
["{key}=values({key})".format(key=key) for key in update_columns]
)
sql = (
"insert%s into {table} {keys} values {values} on duplicate key update %s"
% (" ignore" if insert_ignore else "", update_columns_)
)
elif auto_update:
sql = "replace into {table} {keys} values {values}"
else:
sql = "insert%s into {table} {keys} values {values}" % (
" ignore" if insert_ignore else ""
)
sql = sql.format(table=table, keys=keys, values=values).replace("None", "null")
return sql
def make_update_sql(table, data, condition):
"""
@summary: 适用于mysql, oracle数据库时间需要to_date 处理(TODO)
---------
@param table:
@param data: 表数据 json格式
@param condition: where 条件
---------
@result:
"""
key_values = []
for key, value in data.items():
value = format_sql_value(value)
if isinstance(value, str):
key_values.append("`{}`='{}'".format(key, value))
elif value is None:
key_values.append("`{}`={}".format(key, "null"))
else:
key_values.append("`{}`={}".format(key, value))
key_values = ", ".join(key_values)
sql = "update {table} set {key_values} where {condition}"
sql = sql.format(table=table, key_values=key_values, condition=condition)
return sql
def make_batch_sql(
table, datas, auto_update=False, update_columns=(), update_columns_value=()
):
"""
@summary: 生产批量的sql
---------
@param table:
@param datas: 表数据 [{...}]
@param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
@param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
@param update_columns_value: 需要更新的列的值 默认为datas里边对应的值, 注意 如果值为字符串类型 需要主动加单引号, 如 update_columns_value=("'test'",)
---------
@result:
"""
if not datas:
return
keys = list(datas[0].keys())
values_placeholder = ["%s"] * len(keys)
values = []
for data in datas:
value = []
for key in keys:
current_data = data.get(key)
current_data = format_sql_value(current_data)
value.append(current_data)
values.append(value)
keys = ["`{}`".format(key) for key in keys]
keys = list2str(keys).replace("'", "")
values_placeholder = list2str(values_placeholder).replace("'", "")
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
if update_columns_value:
update_columns_ = ", ".join(
[
"`{key}`={value}".format(key=key, value=value)
for key, value in zip(update_columns, update_columns_value)
]
)
else:
update_columns_ = ", ".join(
["`{key}`=values(`{key}`)".format(key=key) for key in update_columns]
)
sql = "insert into {table} {keys} values {values_placeholder} on duplicate key update {update_columns}".format(
table=table,
keys=keys,
values_placeholder=values_placeholder,
update_columns=update_columns_,
)
elif auto_update:
sql = "replace into {table} {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
else:
sql = "insert ignore into {table} {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
return sql, values
############### json相关 #######################
def key2underline(key, strict=True):
"""
>>> key2underline("HelloWord")
'hello_word'
>>> key2underline("SHData", strict=True)
's_h_data'
>>> key2underline("SHData", strict=False)
'sh_data'
>>> key2underline("SHDataHi", strict=False)
'sh_data_hi'
>>> key2underline("SHDataHi", strict=True)
's_h_data_hi'
"""
regex = "[A-Z]*" if not strict else "[A-Z]"
capitals = re.findall(regex, key)
if capitals:
for pos, capital in enumerate(capitals):
if not capital:
continue
if pos == 0:
if len(capital) > 1:
key = key.replace(
capital, capital[:-1].lower() + "_" + capital[-1].lower(), 1
)
else:
key = key.replace(capital, capital.lower(), 1)
else:
if len(capital) > 1:
key = key.replace(capital, "_" + capital.lower() + "_", 1)
else:
key = key.replace(capital, "_" + capital.lower(), 1)
return key.strip("_")
def key2hump(key):
"""
下划线试变成首字母大写
"""
return key.title().replace("_", "")
def format_json_key(json_data):
json_data_correct = {}
for key, value in json_data.items():
key = key2underline(key)
json_data_correct[key] = value
return json_data_correct
def quick_to_json(text):
"""
@summary: 可快速将浏览器上的header转为json格式
---------
@param text:
---------
@result:
"""
contents = text.split("\n")
json = {}
for content in contents:
if content == "\n":
continue
content = content.strip()
regex = ["(:?.*?):(.*)", "(.*?):? +(.*)", "([^:]*)"]
result = get_info(content, regex)
result = result[0] if isinstance(result[0], tuple) else result
try:
json[result[0]] = eval(result[1].strip())
except:
json[result[0]] = result[1].strip()
return json
##############################
def print_pretty(object):
pprint(object)
def print_params2json(url):
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key_value = param.split("=", 1)
params_json[key_value[0]] = key_value[1]
print(dumps_json(params_json))
def print_cookie2json(cookie_str_or_list):
if isinstance(cookie_str_or_list, str):
cookie_json = {}
cookies = cookie_str_or_list.split("; ")
for cookie in cookies:
name, value = cookie.split("=")
cookie_json[name] = value
else:
cookie_json = get_cookies_from_selenium_cookie(cookie_str_or_list)
print(dumps_json(cookie_json))
###############################
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
>>> flatten(["foo", "bar"])
['foo', 'bar']
>>> flatten(["foo", ["baz", 42], "bar"])
['foo', 'baz', 42, 'bar']
"""
return list(iflatten(x))
def iflatten(x):
"""iflatten(sequence) -> iterator
Similar to ``.flatten()``, but returns iterator instead"""
for el in x:
if _is_listlike(el):
for el_ in flatten(el):
yield el_
else:
yield el
def _is_listlike(x):
"""
>>> _is_listlike("foo")
False
>>> _is_listlike(5)
False
>>> _is_listlike(b"foo")
False
>>> _is_listlike([b"foo"])
True
>>> _is_listlike((b"foo",))
True
>>> _is_listlike({})
True
>>> _is_listlike(set())
True
>>> _is_listlike((x for x in range(3)))
True
>>> _is_listlike(six.moves.xrange(5))
True
"""
return hasattr(x, "__iter__") and not isinstance(x, (six.text_type, bytes))
###################
def re_def_supper_class(obj, supper_class):
"""
重新定义父类
@param obj: 类 如 class A: 则obj为A 或者 A的实例 a.__class__
@param supper_class: 父类
@return:
"""
obj.__bases__ = (supper_class,)
###################
def is_in_rate_limit(rate_limit, *key):
"""
频率限制
:param rate_limit: 限制时间 单位秒
:param key: 限制频率的key
:return: True / False
"""
if rate_limit == 0:
return False
msg_md5 = get_md5(*key)
key = "rate_limit:{}".format(msg_md5)
if get_redisdb().get(key):
return True
get_redisdb().set(key, time.time(), ex=rate_limit)
return False
def dingding_warning(
message,
message_prefix=None,
rate_limit=setting.WARNING_INTERVAL,
url=setting.DINGDING_WARNING_URL,
user_phone=setting.DINGDING_WARNING_PHONE,
):
if not all([url, user_phone, message]):
return
if is_in_rate_limit(rate_limit, url, user_phone, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(user_phone, str):
user_phone = [user_phone]
data = {
"msgtype": "text",
"text": {"content": message},
"at": {"atMobiles": user_phone, "isAtAll": False},
}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("errcode") == 0:
return True
else:
raise Exception(result.get("errmsg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def email_warning(
message,
title,
message_prefix=None,
eamil_sender=setting.EAMIL_SENDER,
eamil_password=setting.EAMIL_PASSWORD,
email_receiver=setting.EMAIL_RECEIVER,
rate_limit=setting.WARNING_INTERVAL,
):
if not all([message, eamil_sender, eamil_password, email_receiver]):
return
if is_in_rate_limit(
rate_limit, email_receiver, eamil_sender, message_prefix or message
):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(email_receiver, str):
email_receiver = [email_receiver]
with EmailSender(username=eamil_sender, password=eamil_password) as email:
return email.send(receivers=email_receiver, title=title, content=message)
def linkedsee_warning(message, rate_limit=3600, message_prefix=None, token=None):
"""
灵犀电话报警
Args:
message:
rate_limit:
message_prefix:
token:
Returns:
"""
if not token:
log.info("未设置灵犀token,不支持报警")
return
if is_in_rate_limit(rate_limit, token, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
headers = {"servicetoken": token, "Content-Type": "application/json"}
url = "http://www.linkedsee.com/alarm/zabbix"
data = {"content": message}
response = requests.post(url, data=json.dumps(data), headers=headers)
return response
def wechat_warning(
message,
message_prefix=None,
rate_limit=setting.WARNING_INTERVAL,
url=setting.WECHAT_WARNING_URL,
user_phone=setting.WECHAT_WARNING_PHONE,
all_users=setting.WECHAT_WARNING_ALL,
):
"""企业微信报警"""
if isinstance(user_phone, str):
user_phone = [user_phone] if user_phone else []
if all_users is True or not user_phone:
user_phone = ["@all"]
if not all([url, message]):
return
if is_in_rate_limit(rate_limit, url, user_phone, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
data = {
"msgtype": "text",
"text": {"content": message, "mentioned_mobile_list": user_phone},
}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("errcode") == 0:
return True
else:
raise Exception(result.get("errmsg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def make_item(cls, data: dict):
"""提供Item类与原数据,快速构建Item实例
:param cls: Item类
:param data: 字典格式的数据
"""
item = cls()
for key, val in data.items():
setattr(item, key, val)
return item | []
| []
| [
"EXECJS_RUNTIME"
]
| [] | ["EXECJS_RUNTIME"] | python | 1 | 0 | |
hazelcast/src/test/java/com/hazelcast/util/PhoneHomeTest.java | /*
* Copyright (c) 2008-2018, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.util;
import com.hazelcast.config.Config;
import com.hazelcast.config.ManagementCenterConfig;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.instance.BuildInfoProvider;
import com.hazelcast.instance.Node;
import com.hazelcast.spi.properties.GroupProperty;
import com.hazelcast.test.HazelcastParallelClassRunner;
import com.hazelcast.test.HazelcastTestSupport;
import com.hazelcast.test.annotation.ParallelTest;
import com.hazelcast.test.annotation.QuickTest;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean;
import java.lang.management.RuntimeMXBean;
import java.util.Map;
import static java.lang.System.getenv;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeFalse;
@RunWith(HazelcastParallelClassRunner.class)
@Category({QuickTest.class, ParallelTest.class})
public class PhoneHomeTest extends HazelcastTestSupport {
@Test
public void testPhoneHomeParameters() {
HazelcastInstance hz = createHazelcastInstance();
Node node = getNode(hz);
PhoneHome phoneHome = new PhoneHome(node);
sleepAtLeastMillis(1);
Map<String, String> parameters = phoneHome.phoneHome(node);
RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean();
assertEquals(parameters.get("version"), BuildInfoProvider.getBuildInfo().getVersion());
assertEquals(parameters.get("m"), node.getLocalMember().getUuid());
assertEquals(parameters.get("e"), null);
assertEquals(parameters.get("oem"), null);
assertEquals(parameters.get("l"), null);
assertEquals(parameters.get("hdgb"), null);
assertEquals(parameters.get("p"), "source");
assertEquals(parameters.get("crsz"), "A");
assertEquals(parameters.get("cssz"), "A");
assertEquals(parameters.get("ccpp"), "0");
assertEquals(parameters.get("cdn"), "0");
assertEquals(parameters.get("cjv"), "0");
assertEquals(parameters.get("cnjs"), "0");
assertEquals(parameters.get("cpy"), "0");
assertEquals(parameters.get("cgo"), "0");
assertEquals(parameters.get("jetv"), "");
assertFalse(Integer.parseInt(parameters.get("cuptm")) < 0);
assertNotEquals(parameters.get("nuptm"), "0");
assertNotEquals(parameters.get("nuptm"), parameters.get("cuptm"));
assertEquals(parameters.get("osn"), osMxBean.getName());
assertEquals(parameters.get("osa"), osMxBean.getArch());
assertEquals(parameters.get("osv"), osMxBean.getVersion());
assertEquals(parameters.get("jvmn"), runtimeMxBean.getVmName());
assertEquals(parameters.get("jvmv"), System.getProperty("java.version"));
assertEquals(parameters.get("mcver"), "MC_NOT_CONFIGURED");
assertEquals(parameters.get("mclicense"), "MC_NOT_CONFIGURED");
}
@Test
public void testPhoneHomeParameters_withManagementCenterConfiguredButNotAvailable() {
ManagementCenterConfig managementCenterConfig = new ManagementCenterConfig()
.setEnabled(true)
.setUrl("http://localhost:11111/mancen");
Config config = new Config()
.setManagementCenterConfig(managementCenterConfig);
HazelcastInstance hz = createHazelcastInstance(config);
Node node = getNode(hz);
PhoneHome phoneHome = new PhoneHome(node);
sleepAtLeastMillis(1);
Map<String, String> parameters = phoneHome.phoneHome(node);
assertEquals(parameters.get("mcver"), "MC_NOT_AVAILABLE");
assertEquals(parameters.get("mclicense"), "MC_NOT_AVAILABLE");
}
@Test
@SuppressWarnings("deprecation")
public void testScheduling_whenVersionCheckIsDisabled() {
Config config = new Config()
.setProperty(GroupProperty.VERSION_CHECK_ENABLED.getName(), "false");
HazelcastInstance hz = createHazelcastInstance(config);
Node node = getNode(hz);
PhoneHome phoneHome = new PhoneHome(node);
phoneHome.check(node);
assertNull(phoneHome.phoneHomeFuture);
}
@Test
public void testScheduling_whenPhoneHomeIsDisabled() {
Config config = new Config()
.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
HazelcastInstance hz = createHazelcastInstance(config);
Node node = getNode(hz);
PhoneHome phoneHome = new PhoneHome(node);
phoneHome.check(node);
assertNull(phoneHome.phoneHomeFuture);
}
@Test
public void testShutdown() {
assumeFalse("Skipping. The PhoneHome is disabled by the Environment variable",
"false".equals(getenv("HZ_PHONE_HOME_ENABLED")));
Config config = new Config()
.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "true");
HazelcastInstance hz = createHazelcastInstance(config);
Node node = getNode(hz);
PhoneHome phoneHome = new PhoneHome(node);
phoneHome.check(node);
assertNotNull(phoneHome.phoneHomeFuture);
assertFalse(phoneHome.phoneHomeFuture.isDone());
assertFalse(phoneHome.phoneHomeFuture.isCancelled());
phoneHome.shutdown();
assertTrue(phoneHome.phoneHomeFuture.isCancelled());
}
@Test
public void testConvertToLetter() {
HazelcastInstance hz = createHazelcastInstance();
Node node = getNode(hz);
PhoneHome phoneHome = new PhoneHome(node);
assertEquals("A", phoneHome.convertToLetter(4));
assertEquals("B", phoneHome.convertToLetter(9));
assertEquals("C", phoneHome.convertToLetter(19));
assertEquals("D", phoneHome.convertToLetter(39));
assertEquals("E", phoneHome.convertToLetter(59));
assertEquals("F", phoneHome.convertToLetter(99));
assertEquals("G", phoneHome.convertToLetter(149));
assertEquals("H", phoneHome.convertToLetter(299));
assertEquals("J", phoneHome.convertToLetter(599));
assertEquals("I", phoneHome.convertToLetter(1000));
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
recgve/models/tensorflow/lightgcn.py | #!/usr/bin/env python
__author__ = "XXX"
__email__ = "XXX"
from tensorflow import keras
import logging
import os
import tensorflow as tf
from constants import *
from representations_based_recommender import (
RepresentationsBasedRecommender,
)
from utils.graph_utils import (
nxgraph_from_user_item_interaction_df,
symmetric_normalized_laplacian_matrix,
)
from utils.tensorflow_utils import to_tf_sparse_tensor
import pandas as pd
logger = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL"))
class LightGCN(keras.Model, RepresentationsBasedRecommender):
"""LightGCN
Note:
paper: https://arxiv.org/abs/2002.02126
Attributes:
train_data (pd.DataFrame): dataframe containing user-item interactions
embeddings_size (int): dimension of user-item embeddings
convolution_depth (int): number of convolution step to perform
"""
def __init__(self, train_data, embeddings_size, convolution_depth):
"""LightGCN
Note:
paper: https://arxiv.org/abs/2002.02126
Args:
train_data (pd.DataFrame): dataframe containing user-item interactions
embeddings_size (int): dimension of user-item embeddings
convolution_depth (int): number of convolution step to perform
"""
keras.Model.__init__(self)
RepresentationsBasedRecommender.__init__(self, train_data)
self.embeddings_size = embeddings_size
# create embeddings
initializer = tf.initializers.GlorotUniform()
self.embeddings = tf.Variable(
initializer(shape=[self.user_count + self.item_count, embeddings_size]),
trainable=True,
)
self.k = convolution_depth
# Compute propagation matrix
graph = nxgraph_from_user_item_interaction_df(
train_data, user_col=DEFAULT_USER_COL, item_col=DEFAULT_ITEM_COL
)
S = symmetric_normalized_laplacian_matrix(graph, self_loop=False)
self.S = to_tf_sparse_tensor(S)
def __call__(self):
"""Return users and items embeddings
Returns:
tf.Variable: embeddings of users and items
"""
x = self.embeddings
depth_embeddings = [x]
# propagation step
for i in range(self.k):
x = tf.sparse.sparse_dense_matmul(self.S, x)
depth_embeddings.append(x)
stackked_emb = tf.stack(depth_embeddings, axis=1)
final_emb = tf.reduce_mean(stackked_emb, axis=1)
return final_emb
def compute_representations(self, user_data):
user_interactions = user_data["interactions"]
user_id = user_interactions[DEFAULT_USER_COL].unique()
logger.info("Computing representations")
embeddings = self()
users_emb = tf.gather(embeddings, tf.constant(user_id)).numpy()
items_emb = tf.gather(
embeddings, tf.constant(self.items_after_users_idxs)
).numpy()
users_repr_df = pd.DataFrame(users_emb, index=user_id)
items_repr_df = pd.DataFrame(items_emb, index=self.item_idxs)
logger.info("Representation computed")
return users_repr_df, items_repr_df
| []
| []
| [
"LOGLEVEL"
]
| [] | ["LOGLEVEL"] | python | 1 | 0 | |
tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
import os
import torch
import warnings
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet3d.apis import single_gpu_test
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_model
from mmdet.apis import multi_gpu_test, set_random_seed
from mmdet.datasets import replace_ImageToTensor
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where results will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--extra_tag', type=str, default='debug')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both specified, '
'--options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
## TODO DEBUG
if args.extra_tag == 'debug':
args.config = '../configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py'
args.checkpoint = 'work_dirs/model_zoo/second/hv_second_secfpn_6x8_80e_kitti-3d-3class_20200620_230238-9208083a.pth'
# args.show = True
# args.show_dir = 'work_dirs/second-3class'
args.fuse_conv_bn = False
args.eval = 'kitti'
## END
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed, deterministic=args.deterministic)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_model(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
# palette for visualization in segmentation tasks
if 'PALETTE' in checkpoint.get('meta', {}):
model.PALETTE = checkpoint['meta']['PALETTE']
elif hasattr(dataset, 'PALETTE'):
# segmentation dataset has `PALETTE` attribute
model.PALETTE = dataset.PALETTE
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| []
| []
| [
"LOCAL_RANK"
]
| [] | ["LOCAL_RANK"] | python | 1 | 0 | |
python/ray/tests/test_placement_group_4.py | import pytest
import os
import ray
import ray.cluster_utils
from ray._private.test_utils import (
get_other_nodes,
wait_for_condition,
is_placement_group_removed,
placement_group_assert_no_leak,
)
from ray._raylet import PlacementGroupID
from ray.util.placement_group import PlacementGroup
from ray.util.client.ray_client_helpers import connect_to_client_or_not
from ray._private.runtime_env.context import RuntimeEnvContext
from ray._private.runtime_env.plugin import RuntimeEnvPlugin
MOCK_WORKER_STARTUP_SLOWLY_PLUGIN_CLASS_PATH = (
"ray.tests.test_placement_group_4.MockWorkerStartupSlowlyPlugin" # noqa
)
class MockWorkerStartupSlowlyPlugin(RuntimeEnvPlugin):
def validate(runtime_env_dict: dict) -> str:
return "success"
@staticmethod
def create(uri: str, runtime_env_dict: dict, ctx: RuntimeEnvContext) -> float:
import time
time.sleep(15)
return 0
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_remove_placement_group(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
@ray.remote
def warmup():
pass
# warm up the cluster.
ray.get([warmup.remote() for _ in range(4)])
with connect_to_client_or_not(connect_to_client):
# First try to remove a placement group that doesn't
# exist. This should not do anything.
random_group_id = PlacementGroupID.from_random()
random_placement_group = PlacementGroup(random_group_id)
for _ in range(3):
ray.util.remove_placement_group(random_placement_group)
# Creating a placement group as soon as it is
# created should work.
placement_group = ray.util.placement_group([{"CPU": 2}, {"CPU": 2}])
assert placement_group.wait(10)
ray.util.remove_placement_group(placement_group)
wait_for_condition(lambda: is_placement_group_removed(placement_group))
# # Now let's create a placement group.
placement_group = ray.util.placement_group([{"CPU": 2}, {"CPU": 2}])
assert placement_group.wait(10)
# Create an actor that occupies resources.
@ray.remote(num_cpus=2)
class A:
def f(self):
return 3
# Currently, there's no way to prevent
# tasks to be retried for removed placement group.
# Set max_retrie=0 for testing.
# TODO(sang): Handle this edge case.
@ray.remote(num_cpus=2, max_retries=0)
def long_running_task():
print(os.getpid())
import time
time.sleep(50)
# Schedule a long running task and actor.
task_ref = long_running_task.options(placement_group=placement_group).remote()
a = A.options(placement_group=placement_group).remote()
assert ray.get(a.f.remote()) == 3
ray.util.remove_placement_group(placement_group)
# Subsequent remove request shouldn't do anything.
for _ in range(3):
ray.util.remove_placement_group(placement_group)
# Make sure placement group resources are
# released and we can schedule this task.
@ray.remote(num_cpus=4)
def f():
return 3
assert ray.get(f.remote()) == 3
# Since the placement group is removed,
# the actor should've been killed.
# That means this request should fail.
with pytest.raises(ray.exceptions.RayActorError, match="actor died"):
ray.get(a.f.remote(), timeout=3.0)
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(task_ref)
def test_remove_placement_group_worker_startup_slowly(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
placement_group = ray.util.placement_group([{"CPU": 2}, {"CPU": 2}])
assert placement_group.wait(10)
@ray.remote(num_cpus=2)
class A:
def f(self):
return 3
@ray.remote(num_cpus=2, max_retries=0)
def long_running_task():
print(os.getpid())
import time
time.sleep(60)
# Schedule a long-running task that uses
# runtime env to mock worker start up slowly.
task_ref = long_running_task.options(
placement_group=placement_group,
runtime_env={"plugins": {MOCK_WORKER_STARTUP_SLOWLY_PLUGIN_CLASS_PATH: {}}},
).remote()
a = A.options(placement_group=placement_group).remote()
assert ray.get(a.f.remote()) == 3
ray.util.remove_placement_group(placement_group)
# Make sure the actor has been killed
# because of the removal of the pg.
# TODO(@clay4444): Make it throw a `ActorPlacementGroupRemoved`.
with pytest.raises(ray.exceptions.RayActorError, match="actor died"):
ray.get(a.f.remote(), timeout=3.0)
# The long-running task should still be in the state
# of leasing-worker bacause of the worker startup delay.
with pytest.raises(ray.exceptions.TaskPlacementGroupRemoved):
ray.get(task_ref)
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_remove_pending_placement_group(ray_start_cluster, connect_to_client):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
# Create a placement group that cannot be scheduled now.
placement_group = ray.util.placement_group([{"GPU": 2}, {"CPU": 2}])
ray.util.remove_placement_group(placement_group)
# TODO(sang): Add state check here.
@ray.remote(num_cpus=4)
def f():
return 3
# Make sure this task is still schedulable.
assert ray.get(f.remote()) == 3
placement_group_assert_no_leak([placement_group])
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_table(ray_start_cluster, connect_to_client):
@ray.remote(num_cpus=2)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
num_nodes = 2
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
pgs_created = []
with connect_to_client_or_not(connect_to_client):
# Originally placement group creation should be pending because
# there are no resources.
name = "name"
strategy = "PACK"
bundles = [{"CPU": 2, "GPU": 1}, {"CPU": 2}]
placement_group = ray.util.placement_group(
name=name, strategy=strategy, bundles=bundles
)
pgs_created.append(placement_group)
result = ray.util.placement_group_table(placement_group)
assert result["name"] == name
assert result["strategy"] == strategy
for i in range(len(bundles)):
assert bundles[i] == result["bundles"][i]
assert result["state"] == "PENDING"
# Now the placement group should be scheduled.
cluster.add_node(num_cpus=5, num_gpus=1)
cluster.wait_for_nodes()
actor_1 = Actor.options(
placement_group=placement_group, placement_group_bundle_index=0
).remote()
ray.get(actor_1.value.remote())
result = ray.util.placement_group_table(placement_group)
assert result["state"] == "CREATED"
# Add tow more placement group for placement group table test.
second_strategy = "SPREAD"
pgs_created.append(
ray.util.placement_group(
name="second_placement_group", strategy=second_strategy, bundles=bundles
)
)
pgs_created.append(
ray.util.placement_group(
name="third_placement_group", strategy=second_strategy, bundles=bundles
)
)
placement_group_table = ray.util.placement_group_table()
assert len(placement_group_table) == 3
true_name_set = {"name", "second_placement_group", "third_placement_group"}
get_name_set = set()
for _, placement_group_data in placement_group_table.items():
get_name_set.add(placement_group_data["name"])
assert true_name_set == get_name_set
placement_group_assert_no_leak(pgs_created)
def test_placement_group_stats(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 1
for _ in range(num_nodes):
cluster.add_node(num_cpus=4, num_gpus=1)
ray.init(address=cluster.address)
# Test createable pgs.
pg = ray.util.placement_group(bundles=[{"CPU": 4, "GPU": 1}])
ray.get(pg.ready())
stats = ray.util.placement_group_table(pg)["stats"]
assert stats["scheduling_attempt"] == 1
assert stats["scheduling_state"] == "FINISHED"
assert stats["end_to_end_creation_latency_ms"] != 0
# Create a pending pg.
pg2 = ray.util.placement_group(bundles=[{"CPU": 4, "GPU": 1}])
def assert_scheduling_state():
stats = ray.util.placement_group_table(pg2)["stats"]
if stats["scheduling_attempt"] != 1:
return False
if stats["scheduling_state"] != "NO_RESOURCES":
return False
if stats["end_to_end_creation_latency_ms"] != 0:
return False
return True
wait_for_condition(assert_scheduling_state)
# Remove the first pg, and the second
# pg should be schedulable now.
ray.util.remove_placement_group(pg)
def assert_scheduling_state():
stats = ray.util.placement_group_table(pg2)["stats"]
if stats["scheduling_state"] != "FINISHED":
return False
if stats["end_to_end_creation_latency_ms"] == 0:
return False
return True
wait_for_condition(assert_scheduling_state)
# Infeasible pg.
pg3 = ray.util.placement_group(bundles=[{"CPU": 4, "a": 1}])
# TODO This is supposed to be infeasible, but it is printed
# as NO_RESOURCES. Fix the issue.
# def assert_scheduling_state():
# stats = ray.util.placement_group_table(pg3)["stats"]
# print(stats)
# if stats["scheduling_state"] != "INFEASIBLE":
# return False
# return True
# wait_for_condition(assert_scheduling_state)
ray.util.remove_placement_group(pg3)
def assert_scheduling_state():
stats = ray.util.placement_group_table(pg3)["stats"]
if stats["scheduling_state"] != "REMOVED":
return False
return True
wait_for_condition(assert_scheduling_state)
placement_group_assert_no_leak([pg2])
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_cuda_visible_devices(ray_start_cluster, connect_to_client):
@ray.remote(num_gpus=1)
def f():
return os.environ["CUDA_VISIBLE_DEVICES"]
cluster = ray_start_cluster
num_nodes = 1
for _ in range(num_nodes):
cluster.add_node(num_gpus=1)
ray.init(address=cluster.address)
with connect_to_client_or_not(connect_to_client):
g1 = ray.util.placement_group([{"CPU": 1, "GPU": 1}])
o1 = f.options(placement_group=g1).remote()
devices = ray.get(o1)
assert devices == "0", devices
placement_group_assert_no_leak([g1])
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_placement_group_reschedule_when_node_dead(
ray_start_cluster, connect_to_client
):
@ray.remote(num_cpus=1)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
cluster.add_node(num_cpus=4)
cluster.add_node(num_cpus=4)
cluster.wait_for_nodes()
ray.init(address=cluster.address, namespace="default_test_namespace")
# Make sure both head and worker node are alive.
nodes = ray.nodes()
assert len(nodes) == 3
assert nodes[0]["alive"] and nodes[1]["alive"] and nodes[2]["alive"]
with connect_to_client_or_not(connect_to_client):
placement_group = ray.util.placement_group(
name="name", strategy="SPREAD", bundles=[{"CPU": 2}, {"CPU": 2}, {"CPU": 2}]
)
actor_1 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0,
lifetime="detached",
).remote()
actor_2 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=1,
lifetime="detached",
).remote()
actor_3 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=2,
lifetime="detached",
).remote()
ray.get(actor_1.value.remote())
ray.get(actor_2.value.remote())
ray.get(actor_3.value.remote())
cluster.remove_node(get_other_nodes(cluster, exclude_head=True)[-1])
cluster.wait_for_nodes()
actor_4 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=0,
lifetime="detached",
).remote()
actor_5 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=1,
lifetime="detached",
).remote()
actor_6 = Actor.options(
placement_group=placement_group,
placement_group_bundle_index=2,
lifetime="detached",
).remote()
ray.get(actor_4.value.remote())
ray.get(actor_5.value.remote())
ray.get(actor_6.value.remote())
placement_group_assert_no_leak([placement_group])
ray.shutdown()
def test_infeasible_pg(ray_start_cluster):
"""Test infeasible pgs are scheduled after new nodes are added."""
cluster = ray_start_cluster
cluster.add_node(num_cpus=2)
ray.init("auto")
bundle = {"CPU": 4, "GPU": 1}
pg = ray.util.placement_group([bundle], name="worker_1", strategy="STRICT_PACK")
# Placement group is infeasible.
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(pg.ready(), timeout=3)
state = ray.util.placement_group_table()[pg.id.hex()]["stats"]["scheduling_state"]
assert state == "INFEASIBLE"
# Add a new node. PG can now be scheduled.
cluster.add_node(num_cpus=4, num_gpus=1)
assert ray.get(pg.ready(), timeout=10)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
fills_test.go | package goftx
import (
"os"
"testing"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/grishinsana/goftx/models"
)
func TestFills_GetFills(t *testing.T) {
_ = godotenv.Load()
ftx := New(
WithAuth(os.Getenv("FTX_KEY"), os.Getenv("FTX_SECRET")),
)
err := ftx.SetServerTimeDiff()
require.NoError(t, err)
market := "ETH/BTC"
fills, err := ftx.Fills.GetFills(&models.GetFillsParams{
Market: &market,
})
assert.NoError(t, err)
assert.NotNil(t, fills)
}
| [
"\"FTX_KEY\"",
"\"FTX_SECRET\""
]
| []
| [
"FTX_SECRET",
"FTX_KEY"
]
| [] | ["FTX_SECRET", "FTX_KEY"] | go | 2 | 0 | |
cloud-sql/mysql/sqlalchemy/main.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
from flask import Flask, render_template, request, Response
import sqlalchemy
# Remember - storing secrets in plaintext is potentially unsafe. Consider using
# something like https://cloud.google.com/kms/ to help keep secrets secret.
db_user = os.environ.get("DB_USER")
db_pass = os.environ.get("DB_PASS")
db_name = os.environ.get("DB_NAME")
cloud_sql_connection_name = os.environ.get("CLOUD_SQL_CONNECTION_NAME")
app = Flask(__name__)
logger = logging.getLogger()
# [START cloud_sql_mysql_sqlalchemy_create]
# The SQLAlchemy engine will help manage interactions, including automatically
# managing a pool of connections to your database
db = sqlalchemy.create_engine(
# Equivalent URL:
# mysql+pymysql://<db_user>:<db_pass>@/<db_name>?unix_sock=/cloudsql/<cloud_sql_instance_name>
sqlalchemy.engine.url.URL(
drivername='mysql+pymysql',
username=db_user,
password=db_pass,
database=db_name,
query={
'unix_socket': '/cloudsql/{}/'.format(cloud_sql_connection_name)
}
),
# ... Specify additional properties here.
# [START_EXCLUDE]
# [START cloud_sql_mysql_sqlalchemy_limit]
# Pool size is the maximum number of permanent connections to keep.
pool_size=5,
# Temporarily exceeds the set pool_size if no connections are available.
max_overflow=2,
# The total number of concurrent connections for your application will be
# a total of pool_size and max_overflow.
# [END cloud_sql_mysql_sqlalchemy_limit]
# [START cloud_sql_mysql_sqlalchemy_backoff]
# SQLAlchemy automatically uses delays between failed connection attempts,
# but provides no arguments for configuration.
# [END cloud_sql_mysql_sqlalchemy_backoff]
# [START cloud_sql_mysql_sqlalchemy_timeout]
# 'pool_timeout' is the maximum number of seconds to wait when retrieving a
# new connection from the pool. After the specified amount of time, an
# exception will be thrown.
pool_timeout=30, # 30 seconds
# [END cloud_sql_mysql_sqlalchemy_timeout]
# [START cloud_sql_mysql_sqlalchemy_lifetime]
# 'pool_recycle' is the maximum number of seconds a connection can persist.
# Connections that live longer than the specified amount of time will be
# reestablished
pool_recycle=1800, # 30 minutes
# [END cloud_sql_mysql_sqlalchemy_lifetime]
# [END_EXCLUDE]
)
# [END cloud_sql_mysql_sqlalchemy_create]
@app.before_first_request
def create_tables():
# Create tables (if they don't already exist)
with db.connect() as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS votes "
"( vote_id SERIAL NOT NULL, time_cast timestamp NOT NULL, "
"candidate CHAR(6) NOT NULL, PRIMARY KEY (vote_id) );"
)
@app.route('/', methods=['GET'])
def index():
votes = []
with db.connect() as conn:
# Execute the query and fetch all results
recent_votes = conn.execute(
"SELECT candidate, time_cast FROM votes "
"ORDER BY time_cast DESC LIMIT 5"
).fetchall()
# Convert the results into a list of dicts representing votes
for row in recent_votes:
votes.append({
'candidate': row[0],
'time_cast': row[1]
})
stmt = sqlalchemy.text(
"SELECT COUNT(vote_id) FROM votes WHERE candidate=:candidate")
# Count number of votes for tabs
tab_result = conn.execute(stmt, candidate="TABS").fetchone()
tab_count = tab_result[0]
# Count number of votes for spaces
space_result = conn.execute(stmt, candidate="SPACES").fetchone()
space_count = space_result[0]
return render_template(
'index.html',
recent_votes=votes,
tab_count=tab_count,
space_count=space_count
)
@app.route('/', methods=['POST'])
def save_vote():
# Get the team and time the vote was cast.
team = request.form['team']
time_cast = datetime.datetime.utcnow()
# Verify that the team is one of the allowed options
if team != "TABS" and team != "SPACES":
logger.warning(team)
return Response(
response="Invalid team specified.",
status=400
)
# [START cloud_sql_mysql_sqlalchemy_connection]
# Preparing a statement before hand can help protect against injections.
stmt = sqlalchemy.text(
"INSERT INTO votes (time_cast, candidate)"
" VALUES (:time_cast, :candidate)"
)
try:
# Using a with statement ensures that the connection is always released
# back into the pool at the end of statement (even if an error occurs)
with db.connect() as conn:
conn.execute(stmt, time_cast=time_cast, candidate=team)
except Exception as e:
# If something goes wrong, handle the error in this section. This might
# involve retrying or adjusting parameters depending on the situation.
# [START_EXCLUDE]
logger.exception(e)
return Response(
status=500,
response="Unable to successfully cast vote! Please check the "
"application logs for more details."
)
# [END_EXCLUDE]
# [END cloud_sql_mysql_sqlalchemy_connection]
return Response(
status=200,
response="Vote successfully cast for '{}' at time {}!".format(
team, time_cast)
)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| []
| []
| [
"DB_PASS",
"CLOUD_SQL_CONNECTION_NAME",
"DB_USER",
"DB_NAME"
]
| [] | ["DB_PASS", "CLOUD_SQL_CONNECTION_NAME", "DB_USER", "DB_NAME"] | python | 4 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'offline.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
internal/server/v1/api.go | package v1
import (
"net/http"
"os"
"time"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/go-chi/jwtauth"
"gitlab.com/menuxd/api-rest/internal/storage"
)
// NewAPI returns the API V1 Handler with configuration.
func NewAPI() (http.Handler, error) {
if err := storage.InitData(); err != nil {
return nil, err
}
tokenAuth := jwtauth.New("HS256", []byte(os.Getenv("XD_SIGNING_STRING")), nil)
r := chi.NewRouter()
um, ur := NewUserRouter(storage.UserStorage{})
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
Post("/login", ur.LoginHandler)
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
Put("/forgot-password/", ur.ForgotPasswordHandler)
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
Mount("/users", um)
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
Mount("/dishes", NewDishRouter(storage.DishStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
Mount("/clients", NewClientRouter(storage.ClientStorage{}))
r.Mount("/orders", NewOrderRouter(
storage.OrderStorage{},
storage.TableStorage{},
storage.DishStorage{},
))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
Mount("/categories", NewCategoryRouter(storage.CategoryStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/waiters", NewWaiterRouter(storage.WaiterStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/pictures", NewPictureRouter())
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/tables", NewTableRouter(storage.TableStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/bills", NewBillRouter(storage.BillStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/promotions", NewPromotionRouter(storage.PromotionStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/ads", NewAdRouter(storage.AdStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/ratings", NewRatingRouter(storage.RatingStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/questions", NewQuestionRouter(storage.QuestionStorage{}))
r.With(middleware.DefaultCompress).
With(middleware.Timeout(10*time.Second)).
With(jwtauth.Verifier(tokenAuth)).With(jwtauth.Authenticator).
Mount("/stay", NewStayRouter(storage.StayStorage{}))
return r, nil
}
| [
"\"XD_SIGNING_STRING\""
]
| []
| [
"XD_SIGNING_STRING"
]
| [] | ["XD_SIGNING_STRING"] | go | 1 | 0 | |
client/grpc/grpc.go | // Package grpc provides a gRPC client
package grpc
import (
"context"
"crypto/tls"
"fmt"
"os"
"sync"
"time"
"github.com/micro/go-micro/broker"
"github.com/micro/go-micro/client"
"github.com/micro/go-micro/client/selector"
raw "github.com/micro/go-micro/codec/bytes"
"github.com/micro/go-micro/errors"
"github.com/micro/go-micro/metadata"
"github.com/micro/go-micro/registry"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
gmetadata "google.golang.org/grpc/metadata"
)
type grpcClient struct {
once sync.Once
opts client.Options
pool *pool
}
func init() {
encoding.RegisterCodec(wrapCodec{jsonCodec{}})
encoding.RegisterCodec(wrapCodec{protoCodec{}})
encoding.RegisterCodec(wrapCodec{bytesCodec{}})
}
// secure returns the dial option for whether its a secure or insecure connection
func (g *grpcClient) secure() grpc.DialOption {
if g.opts.Context != nil {
if v := g.opts.Context.Value(tlsAuth{}); v != nil {
tls := v.(*tls.Config)
creds := credentials.NewTLS(tls)
return grpc.WithTransportCredentials(creds)
}
}
return grpc.WithInsecure()
}
func (g *grpcClient) next(request client.Request, opts client.CallOptions) (selector.Next, error) {
service := request.Service()
// get proxy
if prx := os.Getenv("MICRO_PROXY"); len(prx) > 0 {
service = prx
}
// get proxy address
if prx := os.Getenv("MICRO_PROXY_ADDRESS"); len(prx) > 0 {
opts.Address = []string{prx}
}
// return remote address
if len(opts.Address) > 0 {
return func() (*registry.Node, error) {
return ®istry.Node{
Address: opts.Address[0],
}, nil
}, nil
}
// get next nodes from the selector
next, err := g.opts.Selector.Select(service, opts.SelectOptions...)
if err != nil {
if err == selector.ErrNotFound {
return nil, errors.InternalServerError("go.micro.client", "service %s: %s", service, err.Error())
}
return nil, errors.InternalServerError("go.micro.client", "error selecting %s node: %s", service, err.Error())
}
return next, nil
}
func (g *grpcClient) call(ctx context.Context, node *registry.Node, req client.Request, rsp interface{}, opts client.CallOptions) error {
address := node.Address
header := make(map[string]string)
if md, ok := metadata.FromContext(ctx); ok {
for k, v := range md {
header[k] = v
}
}
// set timeout in nanoseconds
header["timeout"] = fmt.Sprintf("%d", opts.RequestTimeout)
// set the content type for the request
header["x-content-type"] = req.ContentType()
md := gmetadata.New(header)
ctx = gmetadata.NewOutgoingContext(ctx, md)
cf, err := g.newGRPCCodec(req.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
maxRecvMsgSize := g.maxRecvMsgSizeValue()
maxSendMsgSize := g.maxSendMsgSizeValue()
var grr error
grpcDialOptions := []grpc.DialOption{
grpc.WithDefaultCallOptions(grpc.ForceCodec(cf)),
grpc.WithTimeout(opts.DialTimeout),
g.secure(),
grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(maxRecvMsgSize),
grpc.MaxCallSendMsgSize(maxSendMsgSize),
),
}
if opts := g.getGrpcDialOptions(); opts != nil {
grpcDialOptions = append(grpcDialOptions, opts...)
}
cc, err := g.pool.getConn(address, grpcDialOptions...)
if err != nil {
return errors.InternalServerError("go.micro.client", fmt.Sprintf("Error sending request: %v", err))
}
defer func() {
// defer execution of release
g.pool.release(address, cc, grr)
}()
ch := make(chan error, 1)
go func() {
grpcCallOptions := []grpc.CallOption{grpc.CallContentSubtype(cf.Name())}
if opts := g.getGrpcCallOptions(); opts != nil {
grpcCallOptions = append(grpcCallOptions, opts...)
}
err := cc.Invoke(ctx, methodToGRPC(req.Service(), req.Endpoint()), req.Body(), rsp, grpcCallOptions...)
ch <- microError(err)
}()
select {
case err := <-ch:
grr = err
case <-ctx.Done():
grr = ctx.Err()
}
return grr
}
func (g *grpcClient) stream(ctx context.Context, node *registry.Node, req client.Request, opts client.CallOptions) (client.Stream, error) {
address := node.Address
header := make(map[string]string)
if md, ok := metadata.FromContext(ctx); ok {
for k, v := range md {
header[k] = v
}
}
// set timeout in nanoseconds
header["timeout"] = fmt.Sprintf("%d", opts.RequestTimeout)
// set the content type for the request
header["x-content-type"] = req.ContentType()
md := gmetadata.New(header)
ctx = gmetadata.NewOutgoingContext(ctx, md)
cf, err := g.newGRPCCodec(req.ContentType())
if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
var dialCtx context.Context
var cancel context.CancelFunc
if opts.DialTimeout >= 0 {
dialCtx, cancel = context.WithTimeout(ctx, opts.DialTimeout)
} else {
dialCtx, cancel = context.WithCancel(ctx)
}
defer cancel()
wc := wrapCodec{cf}
grpcDialOptions := []grpc.DialOption{
grpc.WithDefaultCallOptions(grpc.ForceCodec(wc)),
g.secure(),
}
if opts := g.getGrpcDialOptions(); opts != nil {
grpcDialOptions = append(grpcDialOptions, opts...)
}
cc, err := grpc.DialContext(dialCtx, address, grpcDialOptions...)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", fmt.Sprintf("Error sending request: %v", err))
}
desc := &grpc.StreamDesc{
StreamName: req.Service() + req.Endpoint(),
ClientStreams: true,
ServerStreams: true,
}
grpcCallOptions := []grpc.CallOption{grpc.CallContentSubtype(cf.Name())}
if opts := g.getGrpcCallOptions(); opts != nil {
grpcCallOptions = append(grpcCallOptions, opts...)
}
// create a new cancelling context
newCtx, cancel := context.WithCancel(ctx)
st, err := cc.NewStream(newCtx, desc, methodToGRPC(req.Service(), req.Endpoint()), grpcCallOptions...)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", fmt.Sprintf("Error creating stream: %v", err))
}
codec := &grpcCodec{
s: st,
c: wc,
}
// set request codec
if r, ok := req.(*grpcRequest); ok {
r.codec = codec
}
rsp := &response{
conn: cc,
stream: st,
codec: cf,
gcodec: codec,
}
return &grpcStream{
context: ctx,
request: req,
response: rsp,
stream: st,
conn: cc,
cancel: cancel,
}, nil
}
func (g *grpcClient) poolMaxStreams() int {
if g.opts.Context == nil {
return DefaultPoolMaxStreams
}
v := g.opts.Context.Value(poolMaxStreams{})
if v == nil {
return DefaultPoolMaxStreams
}
return v.(int)
}
func (g *grpcClient) poolMaxIdle() int {
if g.opts.Context == nil {
return DefaultPoolMaxIdle
}
v := g.opts.Context.Value(poolMaxIdle{})
if v == nil {
return DefaultPoolMaxIdle
}
return v.(int)
}
func (g *grpcClient) maxRecvMsgSizeValue() int {
if g.opts.Context == nil {
return DefaultMaxRecvMsgSize
}
v := g.opts.Context.Value(maxRecvMsgSizeKey{})
if v == nil {
return DefaultMaxRecvMsgSize
}
return v.(int)
}
func (g *grpcClient) maxSendMsgSizeValue() int {
if g.opts.Context == nil {
return DefaultMaxSendMsgSize
}
v := g.opts.Context.Value(maxSendMsgSizeKey{})
if v == nil {
return DefaultMaxSendMsgSize
}
return v.(int)
}
func (g *grpcClient) newGRPCCodec(contentType string) (encoding.Codec, error) {
codecs := make(map[string]encoding.Codec)
if g.opts.Context != nil {
if v := g.opts.Context.Value(codecsKey{}); v != nil {
codecs = v.(map[string]encoding.Codec)
}
}
if c, ok := codecs[contentType]; ok {
return wrapCodec{c}, nil
}
if c, ok := defaultGRPCCodecs[contentType]; ok {
return wrapCodec{c}, nil
}
return nil, fmt.Errorf("Unsupported Content-Type: %s", contentType)
}
func (g *grpcClient) Init(opts ...client.Option) error {
size := g.opts.PoolSize
ttl := g.opts.PoolTTL
for _, o := range opts {
o(&g.opts)
}
// update pool configuration if the options changed
if size != g.opts.PoolSize || ttl != g.opts.PoolTTL {
g.pool.Lock()
g.pool.size = g.opts.PoolSize
g.pool.ttl = int64(g.opts.PoolTTL.Seconds())
g.pool.Unlock()
}
return nil
}
func (g *grpcClient) Options() client.Options {
return g.opts
}
func (g *grpcClient) NewMessage(topic string, msg interface{}, opts ...client.MessageOption) client.Message {
return newGRPCEvent(topic, msg, g.opts.ContentType, opts...)
}
func (g *grpcClient) NewRequest(service, method string, req interface{}, reqOpts ...client.RequestOption) client.Request {
return newGRPCRequest(service, method, req, g.opts.ContentType, reqOpts...)
}
func (g *grpcClient) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
// make a copy of call opts
callOpts := g.opts.CallOptions
for _, opt := range opts {
opt(&callOpts)
}
next, err := g.next(req, callOpts)
if err != nil {
return err
}
// check if we already have a deadline
d, ok := ctx.Deadline()
if !ok {
// no deadline so we create a new one
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, callOpts.RequestTimeout)
defer cancel()
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along
opt := client.WithRequestTimeout(time.Until(d))
opt(&callOpts)
}
// should we noop right here?
select {
case <-ctx.Done():
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
default:
}
// make copy of call method
gcall := g.call
// wrap the call in reverse
for i := len(callOpts.CallWrappers); i > 0; i-- {
gcall = callOpts.CallWrappers[i-1](gcall)
}
// return errors.New("go.micro.client", "request timeout", 408)
call := func(i int) error {
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
if t.Seconds() > 0 {
time.Sleep(t)
}
// select next node
node, err := next()
service := req.Service()
if err != nil {
if err == selector.ErrNotFound {
return errors.InternalServerError("go.micro.client", "service %s: %s", service, err.Error())
}
return errors.InternalServerError("go.micro.client", "error selecting %s node: %s", service, err.Error())
}
// make the call
err = gcall(ctx, node, req, rsp, callOpts)
g.opts.Selector.Mark(service, node, err)
return err
}
ch := make(chan error, callOpts.Retries+1)
var gerr error
for i := 0; i <= callOpts.Retries; i++ {
go func(i int) {
ch <- call(i)
}(i)
select {
case <-ctx.Done():
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
case err := <-ch:
// if the call succeeded lets bail early
if err == nil {
return nil
}
retry, rerr := callOpts.Retry(ctx, req, i, err)
if rerr != nil {
return rerr
}
if !retry {
return err
}
gerr = err
}
}
return gerr
}
func (g *grpcClient) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
// make a copy of call opts
callOpts := g.opts.CallOptions
for _, opt := range opts {
opt(&callOpts)
}
next, err := g.next(req, callOpts)
if err != nil {
return nil, err
}
// #200 - streams shouldn't have a request timeout set on the context
// should we noop right here?
select {
case <-ctx.Done():
return nil, errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
default:
}
call := func(i int) (client.Stream, error) {
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
if t.Seconds() > 0 {
time.Sleep(t)
}
node, err := next()
service := req.Service()
if err != nil {
if err == selector.ErrNotFound {
return nil, errors.InternalServerError("go.micro.client", "service %s: %s", service, err.Error())
}
return nil, errors.InternalServerError("go.micro.client", "error selecting %s node: %s", service, err.Error())
}
stream, err := g.stream(ctx, node, req, callOpts)
g.opts.Selector.Mark(service, node, err)
return stream, err
}
type response struct {
stream client.Stream
err error
}
ch := make(chan response, callOpts.Retries+1)
var grr error
for i := 0; i <= callOpts.Retries; i++ {
go func(i int) {
s, err := call(i)
ch <- response{s, err}
}(i)
select {
case <-ctx.Done():
return nil, errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
case rsp := <-ch:
// if the call succeeded lets bail early
if rsp.err == nil {
return rsp.stream, nil
}
retry, rerr := callOpts.Retry(ctx, req, i, err)
if rerr != nil {
return nil, rerr
}
if !retry {
return nil, rsp.err
}
grr = rsp.err
}
}
return nil, grr
}
func (g *grpcClient) Publish(ctx context.Context, p client.Message, opts ...client.PublishOption) error {
var options client.PublishOptions
for _, o := range opts {
o(&options)
}
md, ok := metadata.FromContext(ctx)
if !ok {
md = make(map[string]string)
}
md["Content-Type"] = p.ContentType()
md["Micro-Topic"] = p.Topic()
cf, err := g.newGRPCCodec(p.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
var body []byte
// passed in raw data
if d, ok := p.Payload().(*raw.Frame); ok {
body = d.Data
} else {
// set the body
b, err := cf.Marshal(p.Payload())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
body = b
}
g.once.Do(func() {
g.opts.Broker.Connect()
})
topic := p.Topic()
// get proxy topic
if prx := os.Getenv("MICRO_PROXY"); len(prx) > 0 {
options.Exchange = prx
}
// get the exchange
if len(options.Exchange) > 0 {
topic = options.Exchange
}
return g.opts.Broker.Publish(topic, &broker.Message{
Header: md,
Body: body,
})
}
func (g *grpcClient) String() string {
return "grpc"
}
func (g *grpcClient) getGrpcDialOptions() []grpc.DialOption {
if g.opts.CallOptions.Context == nil {
return nil
}
v := g.opts.CallOptions.Context.Value(grpcDialOptions{})
if v == nil {
return nil
}
opts, ok := v.([]grpc.DialOption)
if !ok {
return nil
}
return opts
}
func (g *grpcClient) getGrpcCallOptions() []grpc.CallOption {
if g.opts.CallOptions.Context == nil {
return nil
}
v := g.opts.CallOptions.Context.Value(grpcCallOptions{})
if v == nil {
return nil
}
opts, ok := v.([]grpc.CallOption)
if !ok {
return nil
}
return opts
}
func newClient(opts ...client.Option) client.Client {
options := client.NewOptions()
// default content type for grpc
options.ContentType = "application/grpc+proto"
for _, o := range opts {
o(&options)
}
rc := &grpcClient{
once: sync.Once{},
opts: options,
}
rc.pool = newPool(options.PoolSize, options.PoolTTL, rc.poolMaxIdle(), rc.poolMaxStreams())
c := client.Client(rc)
// wrap in reverse
for i := len(options.Wrappers); i > 0; i-- {
c = options.Wrappers[i-1](c)
}
return c
}
func NewClient(opts ...client.Option) client.Client {
return newClient(opts...)
}
| [
"\"MICRO_PROXY\"",
"\"MICRO_PROXY_ADDRESS\"",
"\"MICRO_PROXY\""
]
| []
| [
"MICRO_PROXY",
"MICRO_PROXY_ADDRESS"
]
| [] | ["MICRO_PROXY", "MICRO_PROXY_ADDRESS"] | go | 2 | 0 | |
mainapp/wsgi.py | """
WSGI config for mainapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mainapp.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
exploitationservice_test.go | package goitop
import (
"os"
"testing"
)
func TestClient_GetAllExploitationService(t *testing.T) {
c := NewClient(os.Getenv("GOITOP_ADDR"), os.Getenv("GOITOP_USER"), os.Getenv("GOITOP_PASSWORD"))
services, err := c.GetAllExploitationService()
if err != nil {
t.Fatal(err)
}
if len(services) == 0 {
t.Fatal("No Exploitation Service returned")
}
}
func TestClient_GetExploitationService(t *testing.T) {
c := NewClient(os.Getenv("GOITOP_ADDR"), os.Getenv("GOITOP_USER"), os.Getenv("GOITOP_PASSWORD"))
id, err := c.GetExploitationService(os.Getenv("GOITOP_EXP_NAME"))
if err != nil {
t.Fatal(err)
}
if id != os.Getenv("GOITOP_EXP_ID") {
t.Fatalf("GetExploitationService() got %s; want %s", id, os.Getenv("GOITOP_EXP_ID"))
}
}
| [
"\"GOITOP_ADDR\"",
"\"GOITOP_USER\"",
"\"GOITOP_PASSWORD\"",
"\"GOITOP_ADDR\"",
"\"GOITOP_USER\"",
"\"GOITOP_PASSWORD\"",
"\"GOITOP_EXP_NAME\"",
"\"GOITOP_EXP_ID\"",
"\"GOITOP_EXP_ID\""
]
| []
| [
"GOITOP_ADDR",
"GOITOP_EXP_NAME",
"GOITOP_EXP_ID",
"GOITOP_USER",
"GOITOP_PASSWORD"
]
| [] | ["GOITOP_ADDR", "GOITOP_EXP_NAME", "GOITOP_EXP_ID", "GOITOP_USER", "GOITOP_PASSWORD"] | go | 5 | 0 | |
pkg/kubectl/cmd/diff.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl/apply/parse"
"k8s.io/kubernetes/pkg/kubectl/apply/strategy"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
"k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/utils/exec"
)
var (
diffLong = templates.LongDesc(i18n.T(`
Diff configurations specified by filename or stdin between their local,
last-applied, live and/or "merged" versions.
LOCAL and LIVE versions are diffed by default. Other available keywords
are MERGED and LAST.
Output is always YAML.
KUBERNETES_EXTERNAL_DIFF environment variable can be used to select your own
diff command. By default, the "diff" command available in your path will be
run with "-u" (unicode) and "-N" (treat new files as empty) options.`))
diffExample = templates.Examples(i18n.T(`
# Diff resources included in pod.json. By default, it will diff LOCAL and LIVE versions
kubectl alpha diff -f pod.json
# When one version is specified, diff that version against LIVE
cat service.yaml | kubectl alpha diff -f - MERGED
# Or specify both versions
kubectl alpha diff -f pod.json -f service.yaml LAST LOCAL`))
)
type DiffOptions struct {
FilenameOptions resource.FilenameOptions
}
func isValidArgument(arg string) error {
switch arg {
case "LOCAL", "LIVE", "LAST", "MERGED":
return nil
default:
return fmt.Errorf(`Invalid parameter %q, must be either "LOCAL", "LIVE", "LAST" or "MERGED"`, arg)
}
}
func parseDiffArguments(args []string) (string, string, error) {
if len(args) > 2 {
return "", "", fmt.Errorf("Invalid number of arguments: expected at most 2.")
}
// Default values
from := "LOCAL"
to := "LIVE"
if len(args) > 0 {
from = args[0]
}
if len(args) > 1 {
to = args[1]
}
if err := isValidArgument(to); err != nil {
return "", "", err
}
if err := isValidArgument(from); err != nil {
return "", "", err
}
return from, to, nil
}
func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
var options DiffOptions
diff := DiffProgram{
Exec: exec.New(),
IOStreams: streams,
}
cmd := &cobra.Command{
Use: "diff -f FILENAME",
DisableFlagsInUseLine: true,
Short: i18n.T("Diff different versions of configurations"),
Long: diffLong,
Example: diffExample,
Run: func(cmd *cobra.Command, args []string) {
from, to, err := parseDiffArguments(args)
cmdutil.CheckErr(err)
cmdutil.CheckErr(RunDiff(f, &diff, &options, from, to))
},
}
usage := "contains the configuration to diff"
cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
cmd.MarkFlagRequired("filename")
return cmd
}
// DiffProgram finds and run the diff program. The value of
// KUBERNETES_EXTERNAL_DIFF environment variable will be used a diff
// program. By default, `diff(1)` will be used.
type DiffProgram struct {
Exec exec.Interface
genericclioptions.IOStreams
}
func (d *DiffProgram) getCommand(args ...string) exec.Cmd {
diff := ""
if envDiff := os.Getenv("KUBERNETES_EXTERNAL_DIFF"); envDiff != "" {
diff = envDiff
} else {
diff = "diff"
args = append([]string{"-u", "-N"}, args...)
}
cmd := d.Exec.Command(diff, args...)
cmd.SetStdout(d.Out)
cmd.SetStderr(d.ErrOut)
return cmd
}
// Run runs the detected diff program. `from` and `to` are the directory to diff.
func (d *DiffProgram) Run(from, to string) error {
d.getCommand(from, to).Run() // Ignore diff return code
return nil
}
// Printer is used to print an object.
type Printer struct{}
// Print the object inside the writer w.
func (p *Printer) Print(obj map[string]interface{}, w io.Writer) error {
if obj == nil {
return nil
}
data, err := yaml.Marshal(obj)
if err != nil {
return err
}
_, err = w.Write(data)
return err
}
// DiffVersion gets the proper version of objects, and aggregate them into a directory.
type DiffVersion struct {
Dir *Directory
Name string
}
// NewDiffVersion creates a new DiffVersion with the named version.
func NewDiffVersion(name string) (*DiffVersion, error) {
dir, err := CreateDirectory(name)
if err != nil {
return nil, err
}
return &DiffVersion{
Dir: dir,
Name: name,
}, nil
}
func (v *DiffVersion) getObject(obj Object) (map[string]interface{}, error) {
switch v.Name {
case "LIVE":
return obj.Live()
case "MERGED":
return obj.Merged()
case "LOCAL":
return obj.Local()
case "LAST":
return obj.Last()
}
return nil, fmt.Errorf("Unknown version: %v", v.Name)
}
// Print prints the object using the printer into a new file in the directory.
func (v *DiffVersion) Print(obj Object, printer Printer) error {
vobj, err := v.getObject(obj)
if err != nil {
return err
}
f, err := v.Dir.NewFile(obj.Name())
if err != nil {
return err
}
defer f.Close()
return printer.Print(vobj, f)
}
// Directory creates a new temp directory, and allows to easily create new files.
type Directory struct {
Name string
}
// CreateDirectory does create the actual disk directory, and return a
// new representation of it.
func CreateDirectory(prefix string) (*Directory, error) {
name, err := ioutil.TempDir("", prefix+"-")
if err != nil {
return nil, err
}
return &Directory{
Name: name,
}, nil
}
// NewFile creates a new file in the directory.
func (d *Directory) NewFile(name string) (*os.File, error) {
return os.OpenFile(filepath.Join(d.Name, name), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700)
}
// Delete removes the directory recursively.
func (d *Directory) Delete() error {
return os.RemoveAll(d.Name)
}
// Object is an interface that let's you retrieve multiple version of
// it.
type Object interface {
Local() (map[string]interface{}, error)
Live() (map[string]interface{}, error)
Last() (map[string]interface{}, error)
Merged() (map[string]interface{}, error)
Name() string
}
// InfoObject is an implementation of the Object interface. It gets all
// the information from the Info object.
type InfoObject struct {
Remote runtime.Unstructured
Info *resource.Info
Encoder runtime.Encoder
Parser *parse.Factory
}
var _ Object = &InfoObject{}
func (obj InfoObject) toMap(data []byte) (map[string]interface{}, error) {
m := map[string]interface{}{}
if len(data) == 0 {
return m, nil
}
err := json.Unmarshal(data, &m)
return m, err
}
func (obj InfoObject) Local() (map[string]interface{}, error) {
data, err := runtime.Encode(obj.Encoder, obj.Info.Object)
if err != nil {
return nil, err
}
return obj.toMap(data)
}
func (obj InfoObject) Live() (map[string]interface{}, error) {
if obj.Remote == nil {
return nil, nil // Object doesn't exist on cluster.
}
return obj.Remote.UnstructuredContent(), nil
}
func (obj InfoObject) Merged() (map[string]interface{}, error) {
local, err := obj.Local()
if err != nil {
return nil, err
}
live, err := obj.Live()
if err != nil {
return nil, err
}
last, err := obj.Last()
if err != nil {
return nil, err
}
if live == nil || last == nil {
return local, nil // We probably don't have a live version, merged is local.
}
elmt, err := obj.Parser.CreateElement(last, local, live)
if err != nil {
return nil, err
}
result, err := elmt.Merge(strategy.Create(strategy.Options{}))
return result.MergedResult.(map[string]interface{}), err
}
func (obj InfoObject) Last() (map[string]interface{}, error) {
if obj.Remote == nil {
return nil, nil // No object is live, return empty
}
accessor, err := meta.Accessor(obj.Remote)
if err != nil {
return nil, err
}
annots := accessor.GetAnnotations()
if annots == nil {
return nil, nil // Not an error, just empty.
}
return obj.toMap([]byte(annots[api.LastAppliedConfigAnnotation]))
}
func (obj InfoObject) Name() string {
return obj.Info.Name
}
// Differ creates two DiffVersion and diffs them.
type Differ struct {
From *DiffVersion
To *DiffVersion
}
func NewDiffer(from, to string) (*Differ, error) {
differ := Differ{}
var err error
differ.From, err = NewDiffVersion(from)
if err != nil {
return nil, err
}
differ.To, err = NewDiffVersion(to)
if err != nil {
differ.From.Dir.Delete()
return nil, err
}
return &differ, nil
}
// Diff diffs to versions of a specific object, and print both versions to directories.
func (d *Differ) Diff(obj Object, printer Printer) error {
if err := d.From.Print(obj, printer); err != nil {
return err
}
if err := d.To.Print(obj, printer); err != nil {
return err
}
return nil
}
// Run runs the diff program against both directories.
func (d *Differ) Run(diff *DiffProgram) error {
return diff.Run(d.From.Dir.Name, d.To.Dir.Name)
}
// TearDown removes both temporary directories recursively.
func (d *Differ) TearDown() {
d.From.Dir.Delete() // Ignore error
d.To.Dir.Delete() // Ignore error
}
type Downloader struct {
mapper meta.RESTMapper
dclient dynamic.Interface
ns string
}
func NewDownloader(f cmdutil.Factory) (*Downloader, error) {
var err error
var d Downloader
d.mapper, err = f.ToRESTMapper()
if err != nil {
return nil, err
}
d.dclient, err = f.DynamicClient()
if err != nil {
return nil, err
}
d.ns, _, _ = f.DefaultNamespace()
return &d, nil
}
func (d *Downloader) Download(info *resource.Info) (*unstructured.Unstructured, error) {
gvk := info.Object.GetObjectKind().GroupVersionKind()
mapping, err := d.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
var resource dynamic.ResourceInterface
switch mapping.Scope.Name() {
case meta.RESTScopeNameNamespace:
if info.Namespace == "" {
info.Namespace = d.ns
}
resource = d.dclient.Resource(mapping.Resource).Namespace(info.Namespace)
case meta.RESTScopeNameRoot:
resource = d.dclient.Resource(mapping.Resource)
}
return resource.Get(info.Name, metav1.GetOptions{})
}
// RunDiff uses the factory to parse file arguments, find the version to
// diff, and find each Info object for each files, and runs against the
// differ.
func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, to string) error {
openapi, err := f.OpenAPISchema()
if err != nil {
return err
}
parser := &parse.Factory{Resources: openapi}
differ, err := NewDiffer(from, to)
if err != nil {
return err
}
defer differ.TearDown()
printer := Printer{}
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
r := f.NewBuilder().
Unstructured().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, &options.FilenameOptions).
Local().
Flatten().
Do()
if err := r.Err(); err != nil {
return err
}
dl, err := NewDownloader(f)
if err != nil {
return err
}
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
remote, _ := dl.Download(info)
obj := InfoObject{
Remote: remote,
Info: info,
Parser: parser,
Encoder: cmdutil.InternalVersionJSONEncoder(),
}
return differ.Diff(obj, printer)
})
if err != nil {
return err
}
differ.Run(diff)
return nil
}
| [
"\"KUBERNETES_EXTERNAL_DIFF\""
]
| []
| [
"KUBERNETES_EXTERNAL_DIFF"
]
| [] | ["KUBERNETES_EXTERNAL_DIFF"] | go | 1 | 0 | |
esp8266/arduino/src/platform/ESP8266/Arduino/tools/build.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# build.py — build a sketch using arduino-builder
#
# Wrapper script around arduino-builder which accepts some ESP8266-specific
# options and translates them into FQBN
#
# Copyright © 2016 Ivan Grokhotkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
#
from __future__ import print_function
import sys
import os
import argparse
import platform
import subprocess
import tempfile
import shutil
# Arduino-builder needs forward-slash paths for passed in params or it cannot
# launch the needed toolset.
def windowsize_paths(l):
"""Convert forward-slash paths to backslash paths referenced from C:"""
out = []
for i in l:
if i.startswith('/'):
i = 'C:' + i
out += [i.replace('/', '\\')]
return out
def compile(tmp_dir, sketch, cache, tools_dir, hardware_dir, ide_path, f, args):
cmd = []
cmd += [ide_path + '/arduino-builder']
cmd += ['-compile', '-logger=human']
cmd += ['-build-path', tmp_dir]
cmd += ['-tools', ide_path + '/tools-builder']
if cache != "":
cmd += ['-build-cache', cache ]
if args.library_path:
for lib_dir in args.library_path:
cmd += ['-libraries', lib_dir]
cmd += ['-hardware', ide_path + '/hardware']
if args.hardware_dir:
for hw_dir in args.hardware_dir:
cmd += ['-hardware', hw_dir]
else:
cmd += ['-hardware', hardware_dir]
# Debug=Serial,DebugLevel=Core____
fqbn = '-fqbn=esp8266com:esp8266:{board_name}:' \
'xtal={cpu_freq},' \
'FlashFreq={flash_freq},' \
'FlashMode={flash_mode},' \
'baud=921600,' \
'eesz={flash_size},' \
'ip={lwIP},' \
'ResetMethod=nodemcu'.format(**vars(args))
if args.debug_port and args.debug_level:
fqbn += 'dbg={debug_port},lvl={debug_level}'.format(**vars(args))
if args.waveform_phase:
fqbn += ',waveform=phase'
cmd += [fqbn]
cmd += ['-built-in-libraries', ide_path + '/libraries']
cmd += ['-ide-version=10607']
cmd += ['-warnings={warnings}'.format(**vars(args))]
if args.verbose:
cmd += ['-verbose']
cmd += [sketch]
if platform.system() == "Windows":
cmd = windowsize_paths(cmd)
if args.verbose:
print('Building: ' + " ".join(cmd), file=f)
p = subprocess.Popen(cmd, stdout=f, stderr=subprocess.STDOUT)
p.wait()
return p.returncode
def parse_args():
parser = argparse.ArgumentParser(description='Sketch build helper')
parser.add_argument('-v', '--verbose', help='Enable verbose output',
action='store_true')
parser.add_argument('-i', '--ide_path', help='Arduino IDE path')
parser.add_argument('-p', '--build_path', help='Build directory')
parser.add_argument('-l', '--library_path', help='Additional library path',
action='append')
parser.add_argument('-d', '--hardware_dir', help='Additional hardware path',
action='append')
parser.add_argument('-b', '--board_name', help='Board name', default='generic')
parser.add_argument('-s', '--flash_size', help='Flash size', default='512K64',
choices=['512K0', '512K64', '1M512', '4M1M', '4M3M'])
parser.add_argument('-f', '--cpu_freq', help='CPU frequency', default=80,
choices=[80, 160], type=int)
parser.add_argument('-m', '--flash_mode', help='Flash mode', default='qio',
choices=['dio', 'qio'])
parser.add_argument('-n', '--lwIP', help='lwIP version', default='lm2f',
choices=['lm2f', 'hb2f', 'lm6f', 'hb6f', 'hb1'])
parser.add_argument('-w', '--warnings', help='Compilation warnings level',
default='none', choices=['none', 'all', 'more'])
parser.add_argument('-o', '--output_binary', help='File name for output binary')
parser.add_argument('-k', '--keep', action='store_true',
help='Don\'t delete temporary build directory')
parser.add_argument('--flash_freq', help='Flash frequency', default=40,
type=int, choices=[40, 80])
parser.add_argument('--debug_port', help='Debug port',
choices=['Serial', 'Serial1'])
parser.add_argument('--waveform_phase', action='store_true',
help='Select waveform locked on phase')
parser.add_argument('--debug_level', help='Debug level')
parser.add_argument('--build_cache', help='Build directory to cache core.a', default='')
parser.add_argument('sketch_path', help='Sketch file path')
return parser.parse_args()
def main():
args = parse_args()
ide_path = args.ide_path
if not ide_path:
ide_path = os.environ.get('ARDUINO_IDE_PATH')
if not ide_path:
print("Please specify Arduino IDE path via --ide_path option"
"or ARDUINO_IDE_PATH environment variable.", file=sys.stderr)
return 2
sketch_path = args.sketch_path
tmp_dir = args.build_path
created_tmp_dir = False
if not tmp_dir:
tmp_dir = tempfile.mkdtemp()
created_tmp_dir = True
tools_dir = os.path.dirname(os.path.realpath(__file__)) + '/../tools'
# this is not the correct hardware folder to add.
hardware_dir = os.path.dirname(os.path.realpath(__file__)) + '/../cores'
output_name = tmp_dir + '/' + os.path.basename(sketch_path) + '.bin'
if args.verbose:
print("Sketch: ", sketch_path)
print("Build dir: ", tmp_dir)
print("Cache dir: ", args.build_cache)
print("Output: ", output_name)
if args.verbose:
f = sys.stdout
else:
f = open(tmp_dir + '/build.log', 'w')
res = compile(tmp_dir, sketch_path, args.build_cache, tools_dir, hardware_dir, ide_path, f, args)
if res != 0:
return res
if args.output_binary is not None:
shutil.copy(output_name, args.output_binary)
if created_tmp_dir and not args.keep:
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
sys.exit(main())
| []
| []
| [
"ARDUINO_IDE_PATH"
]
| [] | ["ARDUINO_IDE_PATH"] | python | 1 | 0 | |
mne/tests/test_event.py | import os.path as op
import os
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import warnings
from mne import (read_events, write_events, make_fixed_length_events,
find_events, find_stim_steps, io, pick_channels)
from mne.utils import _TempDir
from mne.event import define_target_events, merge_events
warnings.simplefilter('always')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname = op.join(base_dir, 'test-eve.fif')
fname_gz = op.join(base_dir, 'test-eve.fif.gz')
fname_1 = op.join(base_dir, 'test-1-eve.fif')
fname_txt = op.join(base_dir, 'test-eve.eve')
fname_txt_1 = op.join(base_dir, 'test-eve-1.eve')
# using mne_process_raw --raw test_raw.fif --eventsout test-mpr-eve.eve:
fname_txt_mpr = op.join(base_dir, 'test-mpr-eve.eve')
fname_old_txt = op.join(base_dir, 'test-eve-old-style.eve')
raw_fname = op.join(base_dir, 'test_raw.fif')
def test_add_events():
"""Test adding events to a Raw file"""
# need preload
raw = io.Raw(raw_fname, preload=False)
events = np.array([[raw.first_samp, 0, 1]])
assert_raises(RuntimeError, raw.add_events, events, 'STI 014')
raw = io.Raw(raw_fname, preload=True)
orig_events = find_events(raw, 'STI 014')
# add some events
events = np.array([raw.first_samp, 0, 1])
assert_raises(ValueError, raw.add_events, events, 'STI 014') # bad shape
events[0] = raw.first_samp + raw.n_times + 1
events = events[np.newaxis, :]
assert_raises(ValueError, raw.add_events, events, 'STI 014') # bad time
events[0, 0] = raw.first_samp - 1
assert_raises(ValueError, raw.add_events, events, 'STI 014') # bad time
events[0, 0] = raw.first_samp + 1 # can't actually be first_samp
assert_raises(ValueError, raw.add_events, events, 'STI FOO')
raw.add_events(events, 'STI 014')
new_events = find_events(raw, 'STI 014')
assert_array_equal(new_events, np.concatenate((events, orig_events)))
def test_merge_events():
"""Test event merging
"""
events = read_events(fname) # Use as the gold standard
merges = [1, 2, 3, 4]
events_out = merge_events(events, merges, 1234)
events_out2 = events.copy()
for m in merges:
assert_true(not np.any(events_out[:, 2] == m))
events_out2[events[:, 2] == m, 2] = 1234
assert_array_equal(events_out, events_out2)
# test non-replacement functionality, should be sorted union of orig & new
events_out2 = merge_events(events, merges, 1234, False)
events_out = np.concatenate((events_out, events))
events_out = events_out[np.argsort(events_out[:, 0])]
assert_array_equal(events_out, events_out2)
def test_io_events():
"""Test IO for events
"""
tempdir = _TempDir()
# Test binary fif IO
events = read_events(fname) # Use as the gold standard
write_events(op.join(tempdir, 'events-eve.fif'), events)
events2 = read_events(op.join(tempdir, 'events-eve.fif'))
assert_array_almost_equal(events, events2)
# Test binary fif.gz IO
events2 = read_events(fname_gz) # Use as the gold standard
assert_array_almost_equal(events, events2)
write_events(op.join(tempdir, 'events-eve.fif.gz'), events2)
events2 = read_events(op.join(tempdir, 'events-eve.fif.gz'))
assert_array_almost_equal(events, events2)
# Test new format text file IO
write_events(op.join(tempdir, 'events.eve'), events)
events2 = read_events(op.join(tempdir, 'events.eve'))
assert_array_almost_equal(events, events2)
events2 = read_events(fname_txt_mpr)
assert_array_almost_equal(events, events2)
# Test old format text file IO
events2 = read_events(fname_old_txt)
assert_array_almost_equal(events, events2)
write_events(op.join(tempdir, 'events.eve'), events)
events2 = read_events(op.join(tempdir, 'events.eve'))
assert_array_almost_equal(events, events2)
# Test event selection
a = read_events(op.join(tempdir, 'events-eve.fif'), include=1)
b = read_events(op.join(tempdir, 'events-eve.fif'), include=[1])
c = read_events(op.join(tempdir, 'events-eve.fif'),
exclude=[2, 3, 4, 5, 32])
d = read_events(op.join(tempdir, 'events-eve.fif'), include=1,
exclude=[2, 3])
assert_array_equal(a, b)
assert_array_equal(a, c)
assert_array_equal(a, d)
# Test binary file IO for 1 event
events = read_events(fname_1) # Use as the new gold standard
write_events(op.join(tempdir, 'events-eve.fif'), events)
events2 = read_events(op.join(tempdir, 'events-eve.fif'))
assert_array_almost_equal(events, events2)
# Test text file IO for 1 event
write_events(op.join(tempdir, 'events.eve'), events)
events2 = read_events(op.join(tempdir, 'events.eve'))
assert_array_almost_equal(events, events2)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fname2 = op.join(tempdir, 'test-bad-name.fif')
write_events(fname2, events)
read_events(fname2)
assert_true(len(w) == 2)
def test_find_events():
"""Test find events in raw file
"""
events = read_events(fname)
raw = io.Raw(raw_fname, preload=True)
# let's test the defaulting behavior while we're at it
extra_ends = ['', '_1']
orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
if 'MNE_STIM_CHANNEL_1' in os.environ:
del os.environ['MNE_STIM_CHANNEL_1']
events2 = find_events(raw)
assert_array_almost_equal(events, events2)
# now test with mask
events11 = find_events(raw, mask=3)
events22 = read_events(fname, mask=3)
assert_array_equal(events11, events22)
# Reset some data for ease of comparison
raw.first_samp = 0
raw.info['sfreq'] = 1000
stim_channel = 'STI 014'
stim_channel_idx = pick_channels(raw.info['ch_names'],
include=stim_channel)
# test digital masking
raw._data[stim_channel_idx, :5] = np.arange(5)
raw._data[stim_channel_idx, 5:] = 0
# 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'
assert_raises(TypeError, find_events, raw, mask="0")
assert_array_equal(find_events(raw, shortest_event=1, mask=1),
[[2, 0, 2], [4, 2, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=2),
[[1, 0, 1], [3, 0, 1], [4, 1, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=3),
[[4, 0, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=4),
[[1, 0, 1], [2, 1, 2], [3, 2, 3]])
# test empty events channel
raw._data[stim_channel_idx, :] = 0
assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
raw._data[stim_channel_idx, :4] = 1
assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
raw._data[stim_channel_idx, -1:] = 9
assert_array_equal(find_events(raw), [[14399, 0, 9]])
# Test that we can handle consecutive events with no gap
raw._data[stim_channel_idx, 10:20] = 5
raw._data[stim_channel_idx, 20:30] = 6
raw._data[stim_channel_idx, 30:32] = 5
raw._data[stim_channel_idx, 40] = 6
assert_array_equal(find_events(raw, consecutive=False),
[[10, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, consecutive=True),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw),
[[10, 0, 5],
[20, 5, 6],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, output='offset', consecutive=False),
[[31, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, output='offset', consecutive=True),
[[19, 6, 5],
[29, 5, 6],
[31, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_raises(ValueError, find_events, raw, output='step',
consecutive=True)
assert_array_equal(find_events(raw, output='step', consecutive=True,
shortest_event=1),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5],
[32, 5, 0],
[40, 0, 6],
[41, 6, 0],
[14399, 0, 9],
[14400, 9, 0]])
assert_array_equal(find_events(raw, output='offset'),
[[19, 6, 5],
[31, 0, 6],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
[[10, 0, 5]])
assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5]])
assert_array_equal(find_events(raw, output='offset', consecutive=False,
min_duration=0.002),
[[31, 0, 5]])
assert_array_equal(find_events(raw, output='offset', consecutive=True,
min_duration=0.002),
[[19, 6, 5],
[29, 5, 6],
[31, 0, 5]])
assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
[[10, 0, 5],
[20, 5, 6]])
# test find_stim_steps merge parameter
raw._data[stim_channel_idx, :] = 0
raw._data[stim_channel_idx, 0] = 1
raw._data[stim_channel_idx, 10] = 4
raw._data[stim_channel_idx, 11:20] = 5
assert_array_equal(find_stim_steps(raw, pad_start=0, merge=0,
stim_channel=stim_channel),
[[0, 0, 1],
[1, 1, 0],
[10, 0, 4],
[11, 4, 5],
[20, 5, 0]])
assert_array_equal(find_stim_steps(raw, merge=-1,
stim_channel=stim_channel),
[[1, 1, 0],
[10, 0, 5],
[20, 5, 0]])
assert_array_equal(find_stim_steps(raw, merge=1,
stim_channel=stim_channel),
[[1, 1, 0],
[11, 0, 5],
[20, 5, 0]])
# put back the env vars we trampled on
for s, o in zip(extra_ends, orig_envs):
if o is not None:
os.environ['MNE_STIM_CHANNEL%s' % s] = o
def test_make_fixed_length_events():
"""Test making events of a fixed length
"""
raw = io.Raw(raw_fname)
events = make_fixed_length_events(raw, id=1)
assert_true(events.shape[1], 3)
def test_define_events():
"""Test defining response events
"""
events = read_events(fname)
raw = io.Raw(raw_fname)
events_, _ = define_target_events(events, 5, 32, raw.info['sfreq'],
.2, 0.7, 42, 99)
n_target = events[events[:, 2] == 5].shape[0]
n_miss = events_[events_[:, 2] == 99].shape[0]
n_target_ = events_[events_[:, 2] == 42].shape[0]
assert_true(n_target_ == (n_target - n_miss))
| []
| []
| [
"MNE_STIM_CHANNEL_1",
"MNE_STIM_CHANNEL",
"MNE_STIM_CHANNEL%s' % "
]
| [] | ["MNE_STIM_CHANNEL_1", "MNE_STIM_CHANNEL", "MNE_STIM_CHANNEL%s' % "] | python | 3 | 0 | |
source/machine_connector/m2c2_opcda_connector/m2c2_opcda_connector.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import time
import os
import OpenOPC
import messages as msg
import awsiot.greengrasscoreipc
from awsiot.greengrasscoreipc.model import (
QOS,
SubscribeToIoTCoreRequest
)
from greengrasssdk.stream_manager import ExportDefinition
from inspect import signature
from threading import Timer
from typing import Union
from utils import StreamManagerHelperClient, AWSEndpointClient, InitMessage
from utils.subscription_stream_handler import SubscriptionStreamHandler
from utils.custom_exception import OPCDaConnectorException
from validations.message_validation import MessageValidation
# payload array containing responses from the OPC DA server
# appended to at each execution of the thread
payload_content = []
control = "" # connection control variables monitored by the thread
lock = False # flag used to prevent concurrency
connection = None # OPC connection to the server
# Measured execution time of the thread
# used to ensure the thread has completed its execution
ttl = 0.2
# Greengrass Stream name
CONNECTION_GG_STREAM_NAME = os.environ["CONNECTION_GG_STREAM_NAME"]
# Constant variables
# Connection name from component environment variables
CONNECTION_NAME = os.getenv("CONNECTION_NAME")
# Site name from component environment variables
SITE_NAME = os.getenv("SITE_NAME")
# Area from component environment variables
AREA = os.getenv("AREA")
# Process from component environment variables
PROCESS = os.getenv("PROCESS")
# Machine name from component environment variables
MACHINE_NAME = os.getenv("MACHINE_NAME")
# Connection retry count
CONNECTION_RETRY = 10
# Error retry count
ERROR_RETRY = 5
# Max size of message stream when creating (in bytes)
max_stream_size = 5368706371 # 5G
# Clients and logging
smh_client = StreamManagerHelperClient()
connector_client = AWSEndpointClient()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def form_map() -> dict:
return {
"name": CONNECTION_NAME,
"site_name": SITE_NAME,
"area": AREA,
"process": PROCESS,
"machine_name": MACHINE_NAME
}
def validate_schema(message: dict) -> None:
"""ensure that the data format is as should be"""
""" Ex:
{
"alias": "{site_name}/{area}/{process}/{machine_name}/{tag}",
"messages": [{
"name": alias,
"timestamp": str, (also validate this is a valid timestamp)
"quality": 'Good|GOOD|Bad|BAD|Uncertain|UNCERTAIN',
"value": any
}]
}
"""
format_map = form_map()
post_type = "error"
topic = "m2c2/{post_type}/{name}".format(
**format_map, post_type=post_type
)
try:
validation = MessageValidation(topic)
validation.validate_schema(message)
except Exception as err:
logger.error(f"Message validation failed. Error: {err}")
raise OPCDaConnectorException(msg.ERR_MSG_VALIDATION.format(err))
def m2c2_stream_required_format(tag: str, messages: list) -> dict:
user_message = {}
format_map = form_map()
alias = "{site_name}/{area}/{process}/{machine_name}/{tag}".format(
**format_map, tag=tag
)
user_message["alias"] = alias
user_message["messages"] = [dict(item, name=alias) for item in messages]
return user_message
def info_or_error_format(message: str, post_type: str) -> tuple:
init_message = InitMessage()
user_message = init_message.init_user_message()
format_map = form_map()
topic = "m2c2/{post_type}/{name}".format(
**format_map, post_type=post_type
)
user_message["message"] = message
return topic, user_message
def post_to_user(post_type: str, message: Union[str, dict]) -> None:
try:
if post_type == "data":
for key in message.keys():
formatted_payload = m2c2_stream_required_format(
key, message[key])
# Validate data format
validate_schema(formatted_payload)
# Write messages to the GG message stream
avail_streams = smh_client.list_streams()
if CONNECTION_GG_STREAM_NAME not in avail_streams:
logger.info(
f"Stream {CONNECTION_GG_STREAM_NAME} not found, attempting to create it."
)
gg_exports = ExportDefinition()
smh_client.create_stream(
CONNECTION_GG_STREAM_NAME, max_stream_size, gg_exports
)
smh_client.write_to_stream(
CONNECTION_GG_STREAM_NAME, formatted_payload)
else:
topic, user_message = info_or_error_format(message, post_type)
connector_client.publish_message_to_iot_topic(topic, user_message)
except Exception as err:
logger.error(
f"Failed to publish message to IoT topic or Stream Manager. Error: {err}"
)
raise
def device_connect(connection_data: dict) -> None:
"""Connect the device to OPC server."""
try:
global connection
if connection:
logger.warn("connection exists, closing it...")
connection = None
logger.warn("connection closed done.")
# Connection retries
for i in range(CONNECTION_RETRY):
try:
connection = OpenOPC.open_client(
host=connection_data["opcDa"]["machineIp"]
)
connection.connect(
opc_server=connection_data["opcDa"]["serverName"]
)
break
except:
if i == CONNECTION_RETRY - 1:
raise
logger.error("Connection failed to %s, retry to connect...",
connection_data["opcDa"]["machineIp"])
time.sleep(i + 1)
except Exception as err:
logger.error(f"Connection failed. Error: {err}")
raise OPCDaConnectorException(msg.ERR_MSG_FAIL_TO_CONNECT)
def read_opc_da_data(tags: list, list_tags: list, payload_content: list) -> list:
"""
Reads the OPC DA data from the server.
:param tags: The individual OPC DA tags
:param list_tags: The wildcard pattern OPC DA tags
:param payload_content: The payload content list
"""
# Pull data based on explicit tags provided by the user
if tags:
payload_content.extend(
connection.read(tags)
)
# Pull data based on a wildcard pattern tags provided by the user
# Here, to find all tags that match the wild card, we first must list the tags, then read them
if list_tags:
for entry in list_tags:
payload_content.extend(
connection.read(
connection.list(entry)
)
)
return payload_content
def send_opc_da_data(payload_content: list, current_iteration: int, iterations: int) -> tuple:
"""
Sends the data if the query iterations are done.
:param payload_content: The payload content
:param current_iteration: The current iteration
:param iterations: The iterations to send data to the cloud
:return: The payload content and the current iteration
"""
if current_iteration >= iterations:
current_iteration = 0
post_to_user("data", convert_to_json(payload_content))
payload_content = []
return payload_content, current_iteration
def handle_get_data_error(connection_data: dict, error: Exception, error_count: int) -> int:
"""
Handles job execution error.
When it exceeds the number of retry, `ERROR_RETRY`, retry to connect to OPC DA server.
When if fails ultimately, the connection is going to be stopped.
:param connection_data: The connection data
:param error: The error occurring while getting the data
:param error_count: The number of error count
:return: The number of error count
"""
logger.error(f"Unable to read from server: {error}")
error_count += 1
if error_count >= ERROR_RETRY:
try:
logger.error("Connection retry to OPC DA server...")
device_connect(connection_data)
logger.warn(
"Connection completed. Connection starts again...")
except Exception as err:
logger.error(f"Connection retry failed: {err}")
logger.error("Stopping the connection.")
global control
control = "stop"
post_to_user(
"error", msg.ERR_MSG_LOST_CONNECTION_STOPPED.format(err))
return error_count
def data_collection_control(connection_data: dict, payload_content: list = [], iteration: int = 0, error_count: int = 0) -> None:
"""
Controls data collection from the OPC DA server.
When the control is `start`, it starts reading the data based on the provided tags.
When the control is `stop`, it stops reading the data.
:param connection_data: The connection data
:param payload_content: The payload content which will be sent to the cloud
:param iteration: The current iteration
:param error_count: The number of error count
"""
global control, ttl, connection
if control == "start":
current_error_count = error_count
current_iteration = iteration
opc_da_data = connection_data["opcDa"]
try:
start_time = time.time()
payload_content = read_opc_da_data(
tags=opc_da_data["tags"], list_tags=opc_da_data["listTags"], payload_content=payload_content
)
current_iteration += 1
current_error_count = 0
payload_content, current_iteration = send_opc_da_data(
payload_content=payload_content,
current_iteration=current_iteration,
iterations=opc_da_data["iterations"]
)
ttl = time.time() - start_time
except Exception as err:
current_error_count = handle_get_data_error(
connection_data=connection_data,
error=err,
error_count=current_error_count
)
Timer(
interval=opc_da_data["interval"],
function=data_collection_control,
args=[connection_data, payload_content,
current_iteration, current_error_count]
).start()
elif control == "stop":
if payload_content:
post_to_user("data", convert_to_json(payload_content))
payload_content = []
connector_client.stop_client()
try:
connection.close()
connection = None
except Exception:
pass
def start(connection_data: dict) -> None:
"""Start a connection based on the connection data."""
try:
if connector_client.is_running:
post_to_user(
"info", msg.ERR_MSG_FAIL_LAST_COMMAND_START.format(CONNECTION_NAME))
else:
logger.info("User request: start")
global control
control = "start"
connector_client.start_client(
connection_name=CONNECTION_NAME,
connection_configuration=connection_data
)
device_connect(connection_data)
post_to_user("info", msg.INF_MSG_CONNECTION_STARTED)
data_collection_control(connection_data=connection_data)
except Exception as err:
error_message = f"Failed to execute the start: {err}"
logger.error(error_message)
raise OPCDaConnectorException(error_message)
def stop() -> None:
"""Stop a connection based on the connection data."""
try:
if connector_client.is_running:
logger.info("User request: stop")
global control
control = "stop"
time.sleep(min(5 * ttl, 3))
local_connection_data = connector_client.read_local_connection_configuration(
connection_name=CONNECTION_NAME
)
if local_connection_data:
local_connection_data["control"] = "stop"
connector_client.write_local_connection_configuration_file(
connection_name=CONNECTION_NAME,
connection_configuration=local_connection_data,
)
post_to_user("info", msg.INF_MSG_CONNECTION_STOPPED)
else:
post_to_user(
"info", msg.ERR_MSG_FAIL_LAST_COMMAND_STOP.format(CONNECTION_NAME))
except Exception as err:
error_message = f"Failed to execute the stop: {err}"
logger.error(error_message)
raise OPCDaConnectorException(error_message)
def push(connection_data: dict) -> None:
"""Send the list of servers to users through the IoT topic."""
logger.info("User request: push")
try:
opc = OpenOPC.open_client(
host=connection_data["opcDa"]["machineIp"]
)
server = opc.servers()
opc.close()
post_to_user("info", msg.INF_MSG_SERVER_NAME.format(server))
except Exception as err:
error_message = msg.ERR_MSG_FAIL_SERVER_NAME.format(err)
logger.error(error_message)
post_to_user("error", error_message)
def pull() -> None:
"""Send the local connection data, if exists, to users through the IoT topic."""
logger.info("User request: pull")
try:
local_connection_data = connector_client.read_local_connection_configuration(
CONNECTION_NAME)
if local_connection_data:
post_to_user("info", local_connection_data)
else:
post_to_user(
"error", msg.ERR_MSG_NO_CONNECTION_FILE.format(CONNECTION_NAME))
except Exception as err:
error_message = msg.ERR_MSG_FAIL_SERVER_NAME.format(err)
logger.error(error_message)
post_to_user("error", error_message)
def convert_to_json(payload_content: list) -> dict:
"""Convert the OPC DA array data to JSON (Dict) and return the aggregated JSON data."""
try:
json_response = {}
for t in payload_content: # tuple in payload_content
temp = {}
key = t[0].replace(".", "-").replace("/", "_")
if len(t) == 4:
temp["value"] = t[1]
temp["quality"] = t[2]
temp["timestamp"] = t[3]
else:
temp["value"] = "Parameters cannot be read from server"
json_response.setdefault(key, []).append(temp)
return json_response
except Exception as err:
error_message = f"Failed to convert the data to JSON: {err}"
logger.error(error_message)
return {"error": error_message}
def control_switch() -> dict:
"""Acts like switch/case in the source code for the connection control."""
return {
"start": start,
"stop": stop,
"pull": pull,
"push": push
}
def message_handler(connection_data: dict) -> None:
"""
OPC DA Connector message handler.
:param connection_data: The connection data including the connection control and connection information
"""
global lock
try:
if not lock:
lock = True
connection_control = connection_data["control"].lower()
if connection_control in control_switch().keys():
control_action_function = control_switch().get(
connection_control
)
# Pass the connection data when action requires the connection data as a parameter: start, update, push
# Otherwise, it doesn't pass the connection data as a parameter: push, pull
if len(signature(control_action_function).parameters) > 0:
control_action_function(connection_data)
else:
control_action_function()
else:
post_to_user("error", msg.ERR_MSG_FAIL_UNKNOWN_CONTROL.format(
connection_control))
lock = False
else:
logger.info("The function is still processing.")
except Exception as err:
logger.error(f"Failed to run the connection on the function: {err}")
if type(err).__name__ != "KeyError":
post_to_user("error", f"Failed to run the connection: {err}")
lock = False
connector_client.stop_client()
raise
def main():
"""
Runs infinitely unless there is an error.
The main subscribes the `m2c2/job/{CONNECTION_NAME}` topic,
and when it gets a message from the cloud, it handles the connection control.
"""
topic = f"m2c2/job/{CONNECTION_NAME}"
qos = QOS.AT_MOST_ONCE
operation = None
try:
# When the connection configuration exists and the last control is `start`, start the connection.
existing_configuration = connector_client.read_local_connection_configuration(
connection_name=CONNECTION_NAME
)
if existing_configuration and existing_configuration.get("control", None) == "start":
message_handler(existing_configuration)
request = SubscribeToIoTCoreRequest()
request.topic_name = topic
request.qos = qos
handler = SubscriptionStreamHandler(
message_handler_callback=message_handler
)
ipc_client = awsiot.greengrasscoreipc.connect()
operation = ipc_client.new_subscribe_to_iot_core(handler)
future = operation.activate(request)
future.result(10) # 10 is timeout
# Keep the main thread alive
while True:
time.sleep(10)
except Exception as err:
logger.error(f"An error occurred on the OPC DA connector: {err}")
if operation:
operation.close()
if __name__ == "__main__":
main()
| []
| []
| [
"MACHINE_NAME",
"CONNECTION_NAME",
"AREA",
"SITE_NAME",
"CONNECTION_GG_STREAM_NAME",
"PROCESS"
]
| [] | ["MACHINE_NAME", "CONNECTION_NAME", "AREA", "SITE_NAME", "CONNECTION_GG_STREAM_NAME", "PROCESS"] | python | 6 | 0 | |
vendor/github.com/containers/storage/types/utils.go | package types
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
path, err := getRootlessRuntimeDir(rootlessUID)
if err != nil {
return "", err
}
path = filepath.Join(path, "containers")
if err := os.MkdirAll(path, 0700); err != nil {
return "", errors.Wrapf(err, "unable to make rootless runtime")
}
return path, nil
}
type rootlessRuntimeDirEnvironment interface {
getProcCommandFile() string
getRunUserDir() string
getTmpPerUserDir() string
homeDirGetRuntimeDir() (string, error)
systemLstat(string) (*system.StatT, error)
homedirGet() string
}
type rootlessRuntimeDirEnvironmentImplementation struct {
procCommandFile string
runUserDir string
tmpPerUserDir string
}
func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string {
return env.procCommandFile
}
func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string {
return env.runUserDir
}
func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string {
return env.tmpPerUserDir
}
func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) {
return homedir.GetRuntimeDir()
}
func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) {
return system.Lstat(path)
}
func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string {
return homedir.Get()
}
func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool {
st, err := env.systemLstat(dir)
return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000
}
// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing.
// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function.
func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) {
runtimeDir, err := env.homeDirGetRuntimeDir()
if err == nil {
return runtimeDir, nil
}
runUserDir := env.getRunUserDir()
if isRootlessRuntimeDirOwner(runUserDir, env) {
return runUserDir, nil
}
tmpPerUserDir := env.getTmpPerUserDir()
if tmpPerUserDir != "" {
if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {
if err := os.Mkdir(tmpPerUserDir, 0700); err != nil {
logrus.Errorf("failed to create temp directory for user: %v", err)
} else {
return tmpPerUserDir, nil
}
} else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) {
return tmpPerUserDir, nil
}
}
homeDir := env.homedirGet()
if homeDir == "" {
return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty")
}
resolvedHomeDir, err := filepath.EvalSymlinks(homeDir)
if err != nil {
return "", err
}
return filepath.Join(resolvedHomeDir, "rundir"), nil
}
func getRootlessRuntimeDir(rootlessUID int) (string, error) {
return getRootlessRuntimeDirIsolated(
rootlessRuntimeDirEnvironmentImplementation{
"/proc/1/comm",
fmt.Sprintf("/run/user/%d", rootlessUID),
fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID),
},
)
}
// getRootlessDirInfo returns the parent path of where the storage for containers and
// volumes will be in rootless mode
func getRootlessDirInfo(rootlessUID int) (string, string, error) {
rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID)
if err != nil {
return "", "", err
}
dataDir, err := homedir.GetDataHome()
if err == nil {
return dataDir, rootlessRuntime, nil
}
home := homedir.Get()
if home == "" {
return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
}
// runc doesn't like symlinks in the rootfs path, and at least
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
return "", "", err
}
dataDir = filepath.Join(resolvedHome, ".local", "share")
return dataDir, rootlessRuntime, nil
}
func getRootlessUID() int {
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
if uidEnv != "" {
u, _ := strconv.Atoi(uidEnv)
return u
}
return os.Geteuid()
}
func expandEnvPath(path string, rootlessUID int) (string, error) {
path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1)
return filepath.Clean(os.ExpandEnv(path)), nil
}
func DefaultConfigFile(rootless bool) (string, error) {
if defaultConfigFileSet {
return defaultConfigFile, nil
}
if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok {
return path, nil
}
if !rootless {
return defaultConfigFile, nil
}
if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
return filepath.Join(configHome, "containers/storage.conf"), nil
}
home := homedir.Get()
if home == "" {
return "", errors.New("cannot determine user's homedir")
}
return filepath.Join(home, ".config/containers/storage.conf"), nil
}
func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) {
prevReloadConfig.mutex.Lock()
defer prevReloadConfig.mutex.Unlock()
fi, err := os.Stat(configFile)
if err != nil {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
}
return
}
mtime := fi.ModTime()
if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile {
*storeOptions = *prevReloadConfig.storeOptions
return
}
ReloadConfigurationFile(configFile, storeOptions)
prevReloadConfig.storeOptions = storeOptions
prevReloadConfig.mod = mtime
prevReloadConfig.configFile = configFile
}
| [
"\"_CONTAINERS_ROOTLESS_UID\"",
"\"XDG_CONFIG_HOME\""
]
| []
| [
"_CONTAINERS_ROOTLESS_UID",
"XDG_CONFIG_HOME"
]
| [] | ["_CONTAINERS_ROOTLESS_UID", "XDG_CONFIG_HOME"] | go | 2 | 0 | |
blockchain/wsgi.py | """
WSGI config for blockchain project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blockchain.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/github.com/deislabs/cnab-go/driver/kubernetes/kubernetes.go | package kubernetes
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
// load credential helpers
_ "k8s.io/client-go/plugin/pkg/client/auth"
// Convert transitive deps to direct deps so that we can use constraints in our Gopkg.toml
_ "github.com/Azure/go-autorest/autorest"
"github.com/deislabs/cnab-go/bundle"
"github.com/deislabs/cnab-go/driver"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
batchclientv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
k8sContainerName = "invocation"
k8sFileSecretVolume = "files"
)
// Driver runs an invocation image in a Kubernetes cluster.
type Driver struct {
Namespace string
ServiceAccountName string
Annotations map[string]string
LimitCPU resource.Quantity
LimitMemory resource.Quantity
Tolerations []v1.Toleration
ActiveDeadlineSeconds int64
BackoffLimit int32
SkipCleanup bool
skipJobStatusCheck bool
jobs batchclientv1.JobInterface
secrets coreclientv1.SecretInterface
pods coreclientv1.PodInterface
deletionPolicy metav1.DeletionPropagation
requiredCompletions int32
}
// New initializes a Kubernetes driver.
func New(namespace, serviceAccount string, conf *rest.Config) (*Driver, error) {
driver := &Driver{
Namespace: namespace,
ServiceAccountName: serviceAccount,
}
driver.setDefaults()
err := driver.setClient(conf)
return driver, err
}
// Handles receives an ImageType* and answers whether this driver supports that type.
func (k *Driver) Handles(imagetype string) bool {
return imagetype == driver.ImageTypeDocker || imagetype == driver.ImageTypeOCI
}
// Config returns the Kubernetes driver configuration options.
func (k *Driver) Config() map[string]string {
return map[string]string{
"KUBE_NAMESPACE": "Kubernetes namespace in which to run the invocation image",
"SERVICE_ACCOUNT": "Kubernetes service account to be mounted by the invocation image (if empty, no service account token will be mounted)",
"KUBE_CONFIG": "Absolute path to the kubeconfig file",
"MASTER_URL": "Kubernetes master endpoint",
}
}
// SetConfig sets Kubernetes driver configuration.
func (k *Driver) SetConfig(settings map[string]string) {
k.setDefaults()
k.Namespace = settings["KUBE_NAMESPACE"]
k.ServiceAccountName = settings["SERVICE_ACCOUNT"]
var kubeconfig string
if kpath := settings["KUBE_CONFIG"]; kpath != "" {
kubeconfig = kpath
} else if home := homeDir(); home != "" {
kubeconfig = filepath.Join(home, ".kube", "config")
}
conf, err := clientcmd.BuildConfigFromFlags(settings["MASTER_URL"], kubeconfig)
if err != nil {
panic(err)
}
err = k.setClient(conf)
if err != nil {
panic(err)
}
}
func (k *Driver) setDefaults() {
k.SkipCleanup = false
k.BackoffLimit = 0
k.ActiveDeadlineSeconds = 300
k.requiredCompletions = 1
k.deletionPolicy = metav1.DeletePropagationBackground
}
func (k *Driver) setClient(conf *rest.Config) error {
coreClient, err := coreclientv1.NewForConfig(conf)
if err != nil {
return err
}
batchClient, err := batchclientv1.NewForConfig(conf)
if err != nil {
return err
}
k.jobs = batchClient.Jobs(k.Namespace)
k.secrets = coreClient.Secrets(k.Namespace)
k.pods = coreClient.Pods(k.Namespace)
return nil
}
// Run executes the operation inside of the invocation image.
func (k *Driver) Run(op *driver.Operation) (driver.OperationResult, error) {
if k.Namespace == "" {
return driver.OperationResult{}, fmt.Errorf("KUBE_NAMESPACE is required")
}
labelMap := generateLabels(op)
meta := metav1.ObjectMeta{
Namespace: k.Namespace,
GenerateName: generateNameTemplate(op),
Labels: labelMap,
}
// Mount SA token if a non-zero value for ServiceAccountName has been specified
mountServiceAccountToken := k.ServiceAccountName != ""
job := &batchv1.Job{
ObjectMeta: meta,
Spec: batchv1.JobSpec{
ActiveDeadlineSeconds: &k.ActiveDeadlineSeconds,
Completions: &k.requiredCompletions,
BackoffLimit: &k.BackoffLimit,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labelMap,
Annotations: k.Annotations,
},
Spec: v1.PodSpec{
ServiceAccountName: k.ServiceAccountName,
AutomountServiceAccountToken: &mountServiceAccountToken,
RestartPolicy: v1.RestartPolicyNever,
Tolerations: k.Tolerations,
},
},
},
}
container := v1.Container{
Name: k8sContainerName,
Image: imageWithDigest(op.Image),
Command: []string{"/cnab/app/run"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: k.LimitCPU,
v1.ResourceMemory: k.LimitMemory,
},
},
ImagePullPolicy: v1.PullIfNotPresent,
}
if len(op.Environment) > 0 {
secret := &v1.Secret{
ObjectMeta: meta,
StringData: op.Environment,
}
secret.ObjectMeta.GenerateName += "env-"
envsecret, err := k.secrets.Create(secret)
if err != nil {
return driver.OperationResult{}, err
}
if !k.SkipCleanup {
defer k.deleteSecret(envsecret.ObjectMeta.Name)
}
container.EnvFrom = []v1.EnvFromSource{
{
SecretRef: &v1.SecretEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: envsecret.ObjectMeta.Name,
},
},
},
}
}
if len(op.Files) > 0 {
secret, mounts := generateFileSecret(op.Files)
secret.ObjectMeta = metav1.ObjectMeta{
Namespace: k.Namespace,
GenerateName: generateNameTemplate(op) + "files-",
Labels: labelMap,
}
secret, err := k.secrets.Create(secret)
if err != nil {
return driver.OperationResult{}, err
}
if !k.SkipCleanup {
defer k.deleteSecret(secret.ObjectMeta.Name)
}
job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, v1.Volume{
Name: k8sFileSecretVolume,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secret.ObjectMeta.Name,
},
},
})
container.VolumeMounts = mounts
}
job.Spec.Template.Spec.Containers = []v1.Container{container}
job, err := k.jobs.Create(job)
if err != nil {
return driver.OperationResult{}, err
}
if !k.SkipCleanup {
defer k.deleteJob(job.ObjectMeta.Name)
}
// Return early for unit testing purposes (the fake k8s client implementation just
// hangs during watch because no events are ever created on the Job)
if k.skipJobStatusCheck {
return driver.OperationResult{}, nil
}
selector := metav1.ListOptions{
LabelSelector: labels.Set(job.ObjectMeta.Labels).String(),
}
return driver.OperationResult{}, k.watchJobStatusAndLogs(selector, op.Out)
}
func (k *Driver) watchJobStatusAndLogs(selector metav1.ListOptions, out io.Writer) error {
// Stream Pod logs in the background
logsStreamingComplete := make(chan bool)
err := k.streamPodLogs(selector, out, logsStreamingComplete)
if err != nil {
return err
}
// Watch job events and exit on failure/success
watch, err := k.jobs.Watch(selector)
if err != nil {
return err
}
for event := range watch.ResultChan() {
job, ok := event.Object.(*batchv1.Job)
if !ok {
return fmt.Errorf("unexpected type")
}
complete := false
for _, cond := range job.Status.Conditions {
if cond.Type == batchv1.JobFailed {
err = fmt.Errorf(cond.Message)
complete = true
break
}
if cond.Type == batchv1.JobComplete {
complete = true
break
}
}
if complete {
break
}
}
if err != nil {
return err
}
// Wait for pod logs to finish printing
for i := 0; i < int(k.requiredCompletions); i++ {
<-logsStreamingComplete
}
return nil
}
func (k *Driver) streamPodLogs(options metav1.ListOptions, out io.Writer, done chan bool) error {
watcher, err := k.pods.Watch(options)
if err != nil {
return err
}
go func() {
// Track pods whose logs have been streamed by pod name. We need to know when we've already
// processed logs for a given pod, since multiple lifecycle events are received per pod.
streamedLogs := map[string]bool{}
for event := range watcher.ResultChan() {
pod, ok := event.Object.(*v1.Pod)
if !ok {
continue
}
podName := pod.GetName()
if streamedLogs[podName] {
// The event was for a pod whose logs have already been streamed, so do nothing.
continue
}
req := k.pods.GetLogs(podName, &v1.PodLogOptions{
Container: k8sContainerName,
Follow: true,
})
reader, err := req.Stream()
// There was an error connecting to the pod, so continue the loop and attempt streaming
// logs again next time there is an event for the same pod.
if err != nil {
continue
}
// We successfully connected to the pod, so mark it as having streamed logs.
streamedLogs[podName] = true
// Block the loop until all logs from the pod have been processed.
io.Copy(out, reader)
reader.Close()
done <- true
}
}()
return nil
}
func (k *Driver) deleteSecret(name string) error {
return k.secrets.Delete(name, &metav1.DeleteOptions{
PropagationPolicy: &k.deletionPolicy,
})
}
func (k *Driver) deleteJob(name string) error {
return k.jobs.Delete(name, &metav1.DeleteOptions{
PropagationPolicy: &k.deletionPolicy,
})
}
func generateNameTemplate(op *driver.Operation) string {
return fmt.Sprintf("%s-%s-", op.Installation, op.Action)
}
func generateLabels(op *driver.Operation) map[string]string {
return map[string]string{
"cnab.io/installation": op.Installation,
"cnab.io/action": op.Action,
"cnab.io/revision": op.Revision,
}
}
func generateFileSecret(files map[string]string) (*v1.Secret, []v1.VolumeMount) {
size := len(files)
data := make(map[string]string, size)
mounts := make([]v1.VolumeMount, size)
i := 0
for path, contents := range files {
key := strings.Replace(filepath.ToSlash(path), "/", "_", -1)
data[key] = contents
mounts[i] = v1.VolumeMount{
Name: k8sFileSecretVolume,
MountPath: path,
SubPath: key,
}
i++
}
secret := &v1.Secret{
StringData: data,
}
return secret, mounts
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
func imageWithDigest(img bundle.InvocationImage) string {
if img.Digest == "" {
return img.Image
}
return img.Image + "@" + img.Digest
}
| [
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
src/cmd/initContainer.go | /*
* Copyright © 2019 – 2020 Red Hat Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"strings"
"github.com/containers/toolbox/pkg/shell"
"github.com/containers/toolbox/pkg/utils"
"github.com/fsnotify/fsnotify"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
initContainerFlags struct {
home string
homeLink bool
mediaLink bool
mntLink bool
monitorHost bool
shell string
uid int
user string
}
initContainerMounts = []struct {
containerPath string
source string
flags string
}{
{"/etc/machine-id", "/run/host/etc/machine-id", "ro"},
{"/run/libvirt", "/run/host/run/libvirt", ""},
{"/run/systemd/journal", "/run/host/run/systemd/journal", ""},
{"/run/udev/data", "/run/host/run/udev/data", ""},
{"/tmp", "/run/host/tmp", "rslave"},
{"/var/lib/flatpak", "/run/host/var/lib/flatpak", "ro"},
{"/var/lib/libvirt", "/run/host/var/lib/libvirt", ""},
{"/var/log/journal", "/run/host/var/log/journal", "ro"},
{"/var/mnt", "/run/host/var/mnt", "rslave"},
}
)
var initContainerCmd = &cobra.Command{
Use: "init-container",
Short: "Initialize a running container",
Hidden: true,
RunE: initContainer,
}
func init() {
flags := initContainerCmd.Flags()
flags.StringVar(&initContainerFlags.home,
"home",
"",
"Create a user inside the toolbox container whose login directory is HOME.")
initContainerCmd.MarkFlagRequired("home")
flags.BoolVar(&initContainerFlags.homeLink,
"home-link",
false,
"Make /home a symbolic link to /var/home.")
flags.BoolVar(&initContainerFlags.mediaLink,
"media-link",
false,
"Make /media a symbolic link to /run/media.")
flags.BoolVar(&initContainerFlags.mntLink, "mnt-link", false, "Make /mnt a symbolic link to /var/mnt.")
flags.BoolVar(&initContainerFlags.monitorHost,
"monitor-host",
false,
"Ensure that certain configuration files inside the toolbox container are in sync with the host.")
flags.StringVar(&initContainerFlags.shell,
"shell",
"",
"Create a user inside the toolbox container whose login shell is SHELL.")
initContainerCmd.MarkFlagRequired("shell")
flags.IntVar(&initContainerFlags.uid,
"uid",
0,
"Create a user inside the toolbox container whose numerical user ID is UID.")
initContainerCmd.MarkFlagRequired("uid")
flags.StringVar(&initContainerFlags.user,
"user",
"",
"Create a user inside the toolbox container whose login name is USER.")
initContainerCmd.MarkFlagRequired("user")
initContainerCmd.SetHelpFunc(initContainerHelp)
rootCmd.AddCommand(initContainerCmd)
}
func initContainer(cmd *cobra.Command, args []string) error {
if !utils.IsInsideContainer() {
var builder strings.Builder
fmt.Fprintf(&builder, "the 'init-container' command can only be used inside containers\n")
fmt.Fprintf(&builder, "Run '%s --help' for usage.", executableBase)
errMsg := builder.String()
return errors.New(errMsg)
}
runtimeDirectory := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDirectory == "" {
logrus.Debug("XDG_RUNTIME_DIR is unset")
runtimeDirectory = fmt.Sprintf("/run/user/%d", initContainerFlags.uid)
os.Setenv("XDG_RUNTIME_DIR", runtimeDirectory)
logrus.Debugf("XDG_RUNTIME_DIR set to %s", runtimeDirectory)
}
logrus.Debug("Creating /run/.toolboxenv")
toolboxEnvFile, err := os.Create("/run/.toolboxenv")
if err != nil {
return errors.New("failed to create /run/.toolboxenv")
}
defer toolboxEnvFile.Close()
if initContainerFlags.monitorHost {
logrus.Debug("Monitoring host")
if utils.PathExists("/run/host/etc") {
logrus.Debug("Path /run/host/etc exists")
if _, err := os.Readlink("/etc/host.conf"); err != nil {
if err := redirectPath("/etc/host.conf",
"/run/host/etc/host.conf",
false); err != nil {
return err
}
}
if _, err := os.Readlink("/etc/hosts"); err != nil {
if err := redirectPath("/etc/hosts",
"/run/host/etc/hosts",
false); err != nil {
return err
}
}
if localtimeTarget, err := os.Readlink("/etc/localtime"); err != nil ||
localtimeTarget != "/run/host/etc/localtime" {
if err := redirectPath("/etc/localtime",
"/run/host/etc/localtime",
false); err != nil {
return err
}
}
if err := updateTimeZoneFromLocalTime(); err != nil {
return err
}
if _, err := os.Readlink("/etc/resolv.conf"); err != nil {
if err := redirectPath("/etc/resolv.conf",
"/run/host/etc/resolv.conf",
false); err != nil {
return err
}
}
for _, mount := range initContainerMounts {
if err := mountBind(mount.containerPath, mount.source, mount.flags); err != nil {
return err
}
}
if utils.PathExists("/sys/fs/selinux") {
if err := mountBind("/sys/fs/selinux", "/usr/share/empty", ""); err != nil {
return err
}
}
}
}
if initContainerFlags.mediaLink {
if _, err := os.Readlink("/media"); err != nil {
if err = redirectPath("/media", "/run/media", true); err != nil {
return err
}
}
}
if initContainerFlags.mntLink {
if _, err := os.Readlink("/mnt"); err != nil {
if err := redirectPath("/mnt", "/var/mnt", true); err != nil {
return err
}
}
}
if _, err := user.Lookup(initContainerFlags.user); err != nil {
if err := configureUsers(initContainerFlags.uid,
initContainerFlags.user,
initContainerFlags.home,
initContainerFlags.shell,
initContainerFlags.homeLink,
false); err != nil {
return err
}
} else {
if err := configureUsers(initContainerFlags.uid,
initContainerFlags.user,
initContainerFlags.home,
initContainerFlags.shell,
initContainerFlags.homeLink,
true); err != nil {
return err
}
}
if utils.PathExists("/etc/krb5.conf.d") && !utils.PathExists("/etc/krb5.conf.d/kcm_default_ccache") {
logrus.Debug("Setting KCM as the default Kerberos credential cache")
kcmConfigString := `# Written by Toolbox
# https://github.com/containers/toolbox
#
# # To disable the KCM credential cache, comment out the following lines.
[libdefaults]
default_ccache_name = KCM:
`
kcmConfigBytes := []byte(kcmConfigString)
if err := ioutil.WriteFile("/etc/krb5.conf.d/kcm_default_ccache",
kcmConfigBytes,
0644); err != nil {
return errors.New("failed to set KCM as the defult Kerberos credential cache")
}
}
logrus.Debug("Setting up watches for file system events")
watcherForHost, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer watcherForHost.Close()
if err := watcherForHost.Add("/run/host/etc"); err != nil {
return err
}
logrus.Debug("Finished initializing container")
toolboxRuntimeDirectory := runtimeDirectory + "/toolbox"
logrus.Debugf("Creating runtime directory %s", toolboxRuntimeDirectory)
if err := os.MkdirAll(toolboxRuntimeDirectory, 0700); err != nil {
return fmt.Errorf("failed to create runtime directory %s", toolboxRuntimeDirectory)
}
if err := os.Chown(toolboxRuntimeDirectory, initContainerFlags.uid, initContainerFlags.uid); err != nil {
return fmt.Errorf("failed to change ownership of the runtime directory %s",
toolboxRuntimeDirectory)
}
pid := os.Getpid()
initializedStamp := fmt.Sprintf("%s/container-initialized-%d", toolboxRuntimeDirectory, pid)
logrus.Debugf("Creating initialization stamp %s", initializedStamp)
initializedStampFile, err := os.Create(initializedStamp)
if err != nil {
return errors.New("failed to create initialization stamp")
}
defer initializedStampFile.Close()
if err := initializedStampFile.Chown(initContainerFlags.uid, initContainerFlags.uid); err != nil {
return errors.New("failed to change ownership of initialization stamp")
}
logrus.Debug("Listening to file system events")
for {
select {
case event := <-watcherForHost.Events:
handleFileSystemEvent(event)
case err := <-watcherForHost.Errors:
logrus.Warnf("Received an error from the file system watcher: %v", err)
}
}
return nil
}
func initContainerHelp(cmd *cobra.Command, args []string) {
if utils.IsInsideContainer() {
if !utils.IsInsideToolboxContainer() {
fmt.Fprintf(os.Stderr, "Error: this is not a toolbox container\n")
return
}
if _, err := utils.ForwardToHost(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
return
}
return
}
if err := utils.ShowManual("toolbox-init-container"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
return
}
}
func configureUsers(targetUserUid int,
targetUser, targetUserHome, targetUserShell string,
homeLink, targetUserExists bool) error {
if homeLink {
if err := redirectPath("/home", "/var/home", true); err != nil {
return err
}
}
sudoGroup, err := utils.GetGroupForSudo()
if err != nil {
return fmt.Errorf("failed to get group for sudo: %w", err)
}
if targetUserExists {
logrus.Debugf("Modifying user %s with UID %d:", targetUser, targetUserUid)
usermodArgs := []string{
"--append",
"--groups", sudoGroup,
"--home", targetUserHome,
"--shell", targetUserShell,
"--uid", fmt.Sprint(targetUserUid),
targetUser,
}
logrus.Debug("usermod")
for _, arg := range usermodArgs {
logrus.Debugf("%s", arg)
}
if err := shell.Run("usermod", nil, nil, nil, usermodArgs...); err != nil {
return fmt.Errorf("failed to modify user %s with UID %d", targetUser, targetUserUid)
}
} else {
logrus.Debugf("Adding user %s with UID %d:", targetUser, targetUserUid)
useraddArgs := []string{
"--groups", sudoGroup,
"--home-dir", targetUserHome,
"--no-create-home",
"--shell", targetUserShell,
"--uid", fmt.Sprint(targetUserUid),
targetUser,
}
logrus.Debug("useradd")
for _, arg := range useraddArgs {
logrus.Debugf("%s", arg)
}
if err := shell.Run("useradd", nil, nil, nil, useraddArgs...); err != nil {
return fmt.Errorf("failed to add user %s with UID %d", targetUser, targetUserUid)
}
}
logrus.Debugf("Removing password for user %s", targetUser)
if err := shell.Run("passwd", nil, nil, nil, "--delete", targetUser); err != nil {
return fmt.Errorf("failed to remove password for user %s", targetUser)
}
logrus.Debug("Removing password for user root")
if err := shell.Run("passwd", nil, nil, nil, "--delete", "root"); err != nil {
return errors.New("failed to remove password for root")
}
return nil
}
func handleFileSystemEvent(event fsnotify.Event) {
eventOpString := event.Op.String()
logrus.Debugf("Handling file system event: operation %s on %s", eventOpString, event.Name)
if event.Name == "/run/host/etc/localtime" {
if err := updateTimeZoneFromLocalTime(); err != nil {
logrus.Warnf("Failed to handle changes to the host's /etc/localtime: %v", err)
}
}
}
func mountBind(containerPath, source, flags string) error {
fi, err := os.Stat(source)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("failed to stat %s", source)
}
if fi.IsDir() {
logrus.Debugf("Creating %s", containerPath)
if err := os.MkdirAll(containerPath, 0755); err != nil {
return fmt.Errorf("failed to create %s", containerPath)
}
}
logrus.Debugf("Binding %s to %s", containerPath, source)
args := []string{
"--rbind",
}
if flags != "" {
args = append(args, []string{"-o", flags}...)
}
args = append(args, []string{source, containerPath}...)
if err := shell.Run("mount", nil, nil, nil, args...); err != nil {
return fmt.Errorf("failed to bind %s to %s", containerPath, source)
}
return nil
}
// redirectPath serves for creating symbolic links for crucial system
// configuration files to their counterparts on the host's filesystem.
//
// containerPath and target must be absolute paths
//
// If the target itself is a symbolic link, redirectPath will evaluate the
// link. If it's valid then redirectPath will link containerPath to target.
// If it's not, then redirectPath will still proceed with the linking in two
// different ways depending whether target is an absolute or a relative link:
//
// * absolute - target's destination will be prepended with /run/host, and
// containerPath will be linked to the resulting path. This is
// an attempt to unbreak things, but if it doesn't work then
// it's the user's responsibility to fix it up.
//
// This is meant to address the common case where
// /etc/resolv.conf on the host (ie., /run/host/etc/resolv.conf
// inside the container) is an absolute symbolic link to
// something like /run/systemd/resolve/stub-resolv.conf. The
// container's /etc/resolv.conf will then get linked to
// /run/host/run/systemd/resolved/resolv-stub.conf.
//
// This is why properly configured hosts should use relative
// symbolic links, because they don't need to be adjusted in
// such scenarios.
//
// * relative - containerPath will be linked to the invalid target, and it's
// the user's responsibility to fix it up.
//
// folder signifies if the target is a folder
func redirectPath(containerPath, target string, folder bool) error {
if !filepath.IsAbs(containerPath) {
panic("containerPath must be an absolute path")
}
if !filepath.IsAbs(target) {
panic("target must be an absolute path")
}
logrus.Debugf("Preparing to redirect %s to %s", containerPath, target)
targetSanitized := sanitizeRedirectionTarget(target)
err := os.Remove(containerPath)
if folder {
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to redirect %s to %s: %w", containerPath, target, err)
}
if err := os.MkdirAll(target, 0755); err != nil {
return fmt.Errorf("failed to redirect %s to %s: %w", containerPath, target, err)
}
}
logrus.Debugf("Redirecting %s to %s", containerPath, targetSanitized)
if err := os.Symlink(targetSanitized, containerPath); err != nil {
return fmt.Errorf("failed to redirect %s to %s: %w", containerPath, target, err)
}
return nil
}
func sanitizeRedirectionTarget(target string) string {
if !filepath.IsAbs(target) {
panic("target must be an absolute path")
}
fileInfo, err := os.Lstat(target)
if err != nil {
if os.IsNotExist(err) {
logrus.Warnf("%s not found", target)
} else {
logrus.Warnf("Failed to lstat %s: %v", target, err)
}
return target
}
fileMode := fileInfo.Mode()
if fileMode&os.ModeSymlink == 0 {
logrus.Debugf("%s isn't a symbolic link", target)
return target
}
logrus.Debugf("%s is a symbolic link", target)
_, err = filepath.EvalSymlinks(target)
if err == nil {
return target
}
logrus.Warnf("Failed to resolve %s: %v", target, err)
targetDestination, err := os.Readlink(target)
if err != nil {
logrus.Warnf("Failed to get the destination of %s: %v", target, err)
return target
}
logrus.Debugf("%s points to %s", target, targetDestination)
if filepath.IsAbs(targetDestination) {
logrus.Debugf("Prepending /run/host to %s", targetDestination)
targetGuess := filepath.Join("/run/host", targetDestination)
return targetGuess
}
return target
}
func updateTimeZoneFromLocalTime() error {
localTimeEvaled, err := filepath.EvalSymlinks("/etc/localtime")
if err != nil {
return fmt.Errorf("failed to resolve /etc/localtime: %w", err)
}
logrus.Debugf("Resolved /etc/localtime to %s", localTimeEvaled)
const zoneInfoRoot = "/run/host/usr/share/zoneinfo"
if !strings.HasPrefix(localTimeEvaled, zoneInfoRoot) {
return errors.New("/etc/localtime points to unknown location")
}
timeZone, err := filepath.Rel(zoneInfoRoot, localTimeEvaled)
if err != nil {
return fmt.Errorf("failed to extract time zone: %w", err)
}
const etcTimeZone = "/etc/timezone"
if err := os.Remove(etcTimeZone); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("failed to remove old %s: %w", etcTimeZone, err)
}
}
timeZoneBytes := []byte(timeZone + "\n")
err = ioutil.WriteFile(etcTimeZone, timeZoneBytes, 0664)
if err != nil {
return fmt.Errorf("failed to create new %s: %w", etcTimeZone, err)
}
return nil
}
| [
"\"XDG_RUNTIME_DIR\""
]
| []
| [
"XDG_RUNTIME_DIR"
]
| [] | ["XDG_RUNTIME_DIR"] | go | 1 | 0 | |
http/casedhttp.go | package casedhttp
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"time"
"github.com/cased/cased-go"
)
const (
WebhookTimestampHeader = "X-Cased-Timestamp"
WebhookSignatureHeader = "X-Cased-Signature"
)
var (
WebhookSecret = os.Getenv("CASED_WEBHOOK_SECRET")
WebhookTimestampExpiredError = errors.New("webhook timestamp expired")
WebhookSignatureVerificationError = errors.New("webhook computed signature does not match signature sent with webhook")
)
type VerifyWebhookSignatureParams struct {
// Secret used to compute the HMAC signature. Optional if secret is provided
// by the CASED_WEBHOOK_SECRET environment variable.
Secret *string
// TimestampExpires if provided will reject webhook requests that are
// delivered after specified duration. Useful to prevent replay attacks. Each
// webhook attempt delivered from Cased will provide a new timestamp.
TimestampExpires time.Duration
}
func VerifyWebhookSignature(req *http.Request, params *VerifyWebhookSignatureParams) error {
if req.Method != http.MethodPost {
return errors.New("post request expected")
}
secret := params.Secret
if secret == nil {
secret = &WebhookSecret
}
// Check to see if timestamp expiration is configured and enforce it.
timestamp := req.Header.Get(WebhookTimestampHeader)
if params.TimestampExpires > 0 {
i, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil {
return err
}
// Time from header
tm := time.Unix(i, 0)
expires := time.Now().Add(-params.TimestampExpires)
if tm.Before(expires) {
return WebhookTimestampExpiredError
}
}
signature := req.Header.Get(WebhookSignatureHeader)
body, err := ioutil.ReadAll(req.Body)
if err != nil {
return nil
}
basestring := fmt.Sprintf("%s.%s", timestamp, string(body))
mac := hmac.New(sha256.New, []byte(*secret))
if _, err = mac.Write([]byte(basestring)); err != nil {
return err
}
computed := hex.EncodeToString(mac.Sum(nil))
req.Body = ioutil.NopCloser(bytes.NewBuffer(body))
// Compare the computed signature with signature sent with webhook
if signature != computed {
return WebhookSignatureVerificationError
}
return nil
}
func VerifyWebhookSignatureMiddleware(next http.Handler, params *VerifyWebhookSignatureParams) http.Handler {
if params.Secret == nil && WebhookSecret == "" {
panic("Must set CASED_WEBHOOK_SECRET or provide VerifyWebhookSignatureParams to VerifyWebhookSignatureMiddleware")
}
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if err := VerifyWebhookSignature(req, params); err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
next.ServeHTTP(w, req)
})
}
// ContextMiddleware ...
func ContextMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
location := req.RemoteAddr
if forwardedIP := req.Header.Get("X-Forwarded-For"); forwardedIP != "" {
location = forwardedIP
}
ae := cased.AuditEvent{
"location": cased.NewSensitiveValue(location, "ip-address"),
"request_url": req.URL.String(),
"request_http_method": req.Method,
"request_user_agent": req.Header.Get("User-Agent"),
}
if requestID := req.Header.Get("X-Request-ID"); requestID != "" {
ae["request_id"] = requestID
}
ctx := context.WithValue(req.Context(), cased.ContextKey, ae)
req = req.WithContext(ctx)
next.ServeHTTP(w, req)
})
}
| [
"\"CASED_WEBHOOK_SECRET\""
]
| []
| [
"CASED_WEBHOOK_SECRET"
]
| [] | ["CASED_WEBHOOK_SECRET"] | go | 1 | 0 | |
core/store/migrate/migrations/0056_multichain.go | package migrations
import (
"database/sql"
"fmt"
"math/big"
"os"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigration(Up56, Down56)
}
const up56 = `
CREATE TABLE evm_chains (
id numeric(78,0) PRIMARY KEY,
cfg jsonb NOT NULL DEFAULT '{}',
created_at timestamptz NOT NULL,
updated_at timestamptz NOT NULL
);
CREATE TABLE nodes (
id serial PRIMARY KEY,
name varchar(255) NOT NULL CHECK (name != ''),
evm_chain_id numeric(78,0) NOT NULL REFERENCES evm_chains (id),
ws_url text CHECK (ws_url != ''),
http_url text CHECK (http_url != ''),
send_only bool NOT NULL CONSTRAINT primary_or_sendonly CHECK (
(send_only AND ws_url IS NULL AND http_url IS NOT NULL)
OR
(NOT send_only AND ws_url IS NOT NULL)
),
created_at timestamptz NOT NULL,
updated_at timestamptz NOT NULL
);
CREATE INDEX idx_nodes_evm_chain_id ON nodes (evm_chain_id);
CREATE UNIQUE INDEX idx_nodes_unique_name ON nodes (lower(name));
INSERT INTO evm_chains (id, created_at, updated_at) VALUES (%[1]s, NOW(), NOW());
`
const down56 = `
DROP TABLE nodes;
DROP TABLE evm_chains;
`
func Up56(tx *sql.Tx) error {
chainIDStr := os.Getenv("ETH_CHAIN_ID")
if chainIDStr == "" {
chainIDStr = "1"
}
chainID, ok := new(big.Int).SetString(chainIDStr, 10)
if !ok {
panic(fmt.Sprintf("ETH_CHAIN_ID was invalid, expected a number, got: %s", chainIDStr))
}
sql := fmt.Sprintf(up56, chainID.String())
if _, err := tx.Exec(sql); err != nil {
return err
}
return nil
}
func Down56(tx *sql.Tx) error {
_, err := tx.Exec(down56)
if err != nil {
return err
}
return nil
}
| [
"\"ETH_CHAIN_ID\""
]
| []
| [
"ETH_CHAIN_ID"
]
| [] | ["ETH_CHAIN_ID"] | go | 1 | 0 | |
completions.go | package cobra
import (
"fmt"
"os"
"strings"
"sync"
"github.com/spf13/pflag"
)
const (
// ShellCompRequestCmd is the name of the hidden command that is used to request
// completion results from the program. It is used by the shell completion scripts.
ShellCompRequestCmd = "__complete"
// ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
// completion results without their description. It is used by the shell completion scripts.
ShellCompNoDescRequestCmd = "__completeNoDesc"
)
// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it.
var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
// lock for reading and writing from flagCompletionFunctions
var flagCompletionMutex = &sync.RWMutex{}
// ShellCompDirective is a bit map representing the different behaviors the shell
// can be instructed to have once completions have been provided.
type ShellCompDirective int
type flagCompError struct {
subCommand string
flagName string
}
func (e *flagCompError) Error() string {
return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'"
}
const (
// ShellCompDirectiveError indicates an error occurred and completions should be ignored.
ShellCompDirectiveError ShellCompDirective = 1 << iota
// ShellCompDirectiveNoSpace indicates that the shell should not add a space
// after the completion even if there is a single completion provided.
ShellCompDirectiveNoSpace
// ShellCompDirectiveNoFileComp indicates that the shell should not provide
// file completion even when no completion is provided.
ShellCompDirectiveNoFileComp
// ShellCompDirectiveFilterFileExt indicates that the provided completions
// should be used as file extension filters.
// For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename()
// is a shortcut to using this directive explicitly. The BashCompFilenameExt
// annotation can also be used to obtain the same behavior for flags.
ShellCompDirectiveFilterFileExt
// ShellCompDirectiveFilterDirs indicates that only directory names should
// be provided in file completion. To request directory names within another
// directory, the returned completions should specify the directory within
// which to search. The BashCompSubdirsInDir annotation can be used to
// obtain the same behavior but only for flags.
ShellCompDirectiveFilterDirs
// ===========================================================================
// All directives using iota should be above this one.
// For internal use.
shellCompDirectiveMaxValue
// ShellCompDirectiveDefault indicates to let the shell perform its default
// behavior after completions have been provided.
// This one must be last to avoid messing up the iota count.
ShellCompDirectiveDefault ShellCompDirective = 0
)
const (
// Constants for the completion command
compCmdName = "completion"
compCmdNoDescFlagName = "no-descriptions"
compCmdNoDescFlagDesc = "disable completion descriptions"
compCmdNoDescFlagDefault = false
)
// CompletionOptions are the options to control shell completion
type CompletionOptions struct {
// DisableDefaultCmd prevents Cobra from creating a default 'completion' command
DisableDefaultCmd bool
// DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
// for shells that support completion descriptions
DisableNoDescFlag bool
// DisableDescriptions turns off all completion descriptions for shells
// that support them
DisableDescriptions bool
// HiddenDefaultCmd makes the default 'completion' command hidden
HiddenDefaultCmd bool
}
// NoFileCompletions can be used to disable file completion for commands that should
// not trigger file completions.
func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
return nil, ShellCompDirectiveNoFileComp
}
// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
flag := c.Flag(flagName)
if flag == nil {
return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
}
flagCompletionMutex.Lock()
defer flagCompletionMutex.Unlock()
if _, exists := flagCompletionFunctions[flag]; exists {
return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
}
flagCompletionFunctions[flag] = f
return nil
}
// Returns a string listing the different directive enabled in the specified parameter
func (d ShellCompDirective) string() string {
var directives []string
if d&ShellCompDirectiveError != 0 {
directives = append(directives, "ShellCompDirectiveError")
}
if d&ShellCompDirectiveNoSpace != 0 {
directives = append(directives, "ShellCompDirectiveNoSpace")
}
if d&ShellCompDirectiveNoFileComp != 0 {
directives = append(directives, "ShellCompDirectiveNoFileComp")
}
if d&ShellCompDirectiveFilterFileExt != 0 {
directives = append(directives, "ShellCompDirectiveFilterFileExt")
}
if d&ShellCompDirectiveFilterDirs != 0 {
directives = append(directives, "ShellCompDirectiveFilterDirs")
}
if len(directives) == 0 {
directives = append(directives, "ShellCompDirectiveDefault")
}
if d >= shellCompDirectiveMaxValue {
return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
}
return strings.Join(directives, ", ")
}
// Adds a special hidden command that can be used to request custom completions.
func (c *Command) initCompleteCmd(args []string) {
completeCmd := &Command{
Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
Aliases: []string{ShellCompNoDescRequestCmd},
DisableFlagsInUseLine: true,
Hidden: true,
DisableFlagParsing: true,
Args: MinimumNArgs(1),
Short: "Request shell completion choices for the specified command-line",
Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
"to request completion choices for the specified command-line.", ShellCompRequestCmd),
Run: func(cmd *Command, args []string) {
finalCmd, completions, directive, err := cmd.getCompletions(args)
if err != nil {
CompErrorln(err.Error())
// Keep going for multiple reasons:
// 1- There could be some valid completions even though there was an error
// 2- Even without completions, we need to print the directive
}
noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
for _, comp := range completions {
if noDescriptions {
// Remove any description that may be included following a tab character.
comp = strings.Split(comp, "\t")[0]
}
// Make sure we only write the first line to the output.
// This is needed if a description contains a linebreak.
// Otherwise the shell scripts will interpret the other lines as new flags
// and could therefore provide a wrong completion.
comp = strings.Split(comp, "\n")[0]
// Finally trim the completion. This is especially important to get rid
// of a trailing tab when there are no description following it.
// For example, a sub-command without a description should not be completed
// with a tab at the end (or else zsh will show a -- following it
// although there is no description).
comp = strings.TrimSpace(comp)
// Print each possible completion to stdout for the completion script to consume.
fmt.Fprintln(finalCmd.OutOrStdout(), comp)
}
// As the last printout, print the completion directive for the completion script to parse.
// The directive integer must be that last character following a single colon (:).
// The completion script expects :<directive>
fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive)
// Print some helpful info to stderr for the user to understand.
// Output from stderr must be ignored by the completion script.
fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
},
}
c.AddCommand(completeCmd)
subCmd, _, err := c.Find(args)
if err != nil || subCmd.Name() != ShellCompRequestCmd {
// Only create this special command if it is actually being called.
// This reduces possible side-effects of creating such a command;
// for example, having this command would cause problems to a
// cobra program that only consists of the root command, since this
// command would cause the root command to suddenly have a subcommand.
c.RemoveCommand(completeCmd)
}
}
func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
// The last argument, which is not completely typed by the user,
// should not be part of the list of arguments
toComplete := args[len(args)-1]
trimmedArgs := args[:len(args)-1]
var finalCmd *Command
var finalArgs []string
var err error
// Find the real command for which completion must be performed
// check if we need to traverse here to parse local flags on parent commands
if c.Root().TraverseChildren {
finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
} else {
finalCmd, finalArgs, err = c.Root().Find(trimmedArgs)
}
if err != nil {
// Unable to find the real command. E.g., <program> someInvalidCmd <TAB>
return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
}
finalCmd.ctx = c.ctx
// Check if we are doing flag value completion before parsing the flags.
// This is important because if we are completing a flag value, we need to also
// remove the flag name argument from the list of finalArgs or else the parsing
// could fail due to an invalid value (incomplete) for the flag.
flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
// Check if interspersed is false or -- was set on a previous arg.
// This works by counting the arguments. Normally -- is not counted as arg but
// if -- was already set or interspersed is false and there is already one arg then
// the extra added -- is counted as arg.
flagCompletion := true
_ = finalCmd.ParseFlags(append(finalArgs, "--"))
newArgCount := finalCmd.Flags().NArg()
// Parse the flags early so we can check if required flags are set
if err = finalCmd.ParseFlags(finalArgs); err != nil {
return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
}
realArgCount := finalCmd.Flags().NArg()
if newArgCount > realArgCount {
// don't do flag completion (see above)
flagCompletion = false
}
// Error while attempting to parse flags
if flagErr != nil {
// If error type is flagCompError and we don't want flagCompletion we should ignore the error
if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) {
return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr
}
}
// We only remove the flags from the arguments if DisableFlagParsing is not set.
// This is important for commands which have requested to do their own flag completion.
if !finalCmd.DisableFlagParsing {
finalArgs = finalCmd.Flags().Args()
}
if flag != nil && flagCompletion {
// Check if we are completing a flag value subject to annotations
if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
if len(validExts) != 0 {
// File completion filtered by extensions
return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil
}
// The annotation requests simple file completion. There is no reason to do
// that since it is the default behavior anyway. Let's ignore this annotation
// in case the program also registered a completion function for this flag.
// Even though it is a mistake on the program's side, let's be nice when we can.
}
if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present {
if len(subDir) == 1 {
// Directory completion from within a directory
return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil
}
// Directory completion
return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil
}
}
var completions []string
var directive ShellCompDirective
// Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true;
// doing this allows for completion of persistant flag names even for commands that disable flag parsing.
//
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
// the flag name to be complete
if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion {
// First check for required flags
completions = completeRequireFlags(finalCmd, toComplete)
// If we have not found any required flags, only then can we show regular flags
if len(completions) == 0 {
doCompleteFlags := func(flag *pflag.Flag) {
if !flag.Changed ||
strings.Contains(flag.Value.Type(), "Slice") ||
strings.Contains(flag.Value.Type(), "Array") {
// If the flag is not already present, or if it can be specified multiple times (Array or Slice)
// we suggest it as a completion
completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
}
}
// We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
// that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
// non-inherited flags.
finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteFlags(flag)
})
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteFlags(flag)
})
}
directive = ShellCompDirectiveNoFileComp
if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
// If there is a single completion, the shell usually adds a space
// after the completion. We don't want that if the flag ends with an =
directive = ShellCompDirectiveNoSpace
}
if !finalCmd.DisableFlagParsing {
// If DisableFlagParsing==false, we have completed the flags as known by Cobra;
// we can return what we found.
// If DisableFlagParsing==true, Cobra may not be aware of all flags, so we
// let the logic continue to see if ValidArgsFunction needs to be called.
return finalCmd, completions, directive, nil
}
} else {
directive = ShellCompDirectiveDefault
if flag == nil {
foundLocalNonPersistentFlag := false
// If TraverseChildren is true on the root command we don't check for
// local flags because we can use a local flag on a parent command
if !finalCmd.Root().TraverseChildren {
// Check if there are any local, non-persistent flags on the command-line
localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
foundLocalNonPersistentFlag = true
}
})
}
// Complete subcommand names, including the help command
if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
// We only complete sub-commands if:
// - there are no arguments on the command-line and
// - there are no local, non-persistent flags on the command-line or TraverseChildren is true
for _, subCmd := range finalCmd.Commands() {
if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
if strings.HasPrefix(subCmd.Name(), toComplete) {
completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
}
directive = ShellCompDirectiveNoFileComp
}
}
}
// Complete required flags even without the '-' prefix
completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
// Always complete ValidArgs, even if we are completing a subcommand name.
// This is for commands that have both subcommands and ValidArgs.
if len(finalCmd.ValidArgs) > 0 {
if len(finalArgs) == 0 {
// ValidArgs are only for the first argument
for _, validArg := range finalCmd.ValidArgs {
if strings.HasPrefix(validArg, toComplete) {
completions = append(completions, validArg)
}
}
directive = ShellCompDirectiveNoFileComp
// If no completions were found within commands or ValidArgs,
// see if there are any ArgAliases that should be completed.
if len(completions) == 0 {
for _, argAlias := range finalCmd.ArgAliases {
if strings.HasPrefix(argAlias, toComplete) {
completions = append(completions, argAlias)
}
}
}
}
// If there are ValidArgs specified (even if they don't match), we stop completion.
// Only one of ValidArgs or ValidArgsFunction can be used for a single command.
return finalCmd, completions, directive, nil
}
// Let the logic continue so as to add any ValidArgsFunction completions,
// even if we already found sub-commands.
// This is for commands that have subcommands but also specify a ValidArgsFunction.
}
}
// Find the completion function for the flag or command
var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
if flag != nil && flagCompletion {
flagCompletionMutex.RLock()
completionFn = flagCompletionFunctions[flag]
flagCompletionMutex.RUnlock()
} else {
completionFn = finalCmd.ValidArgsFunction
}
if completionFn != nil {
// Go custom completion defined for this flag or command.
// Call the registered completion function to get the completions.
var comps []string
comps, directive = completionFn(finalCmd, finalArgs, toComplete)
completions = append(completions, comps...)
}
return finalCmd, completions, directive, nil
}
func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
if nonCompletableFlag(flag) {
return []string{}
}
var completions []string
flagName := "--" + flag.Name
if strings.HasPrefix(flagName, toComplete) {
// Flag without the =
completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
// Why suggest both long forms: --flag and --flag= ?
// This forces the user to *always* have to type either an = or a space after the flag name.
// Let's be nice and avoid making users have to do that.
// Since boolean flags and shortname flags don't show the = form, let's go that route and never show it.
// The = form will still work, we just won't suggest it.
// This also makes the list of suggested flags shorter as we avoid all the = forms.
//
// if len(flag.NoOptDefVal) == 0 {
// // Flag requires a value, so it can be suffixed with =
// flagName += "="
// completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
// }
}
flagName = "-" + flag.Shorthand
if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
}
return completions
}
func completeRequireFlags(finalCmd *Command, toComplete string) []string {
var completions []string
doCompleteRequiredFlags := func(flag *pflag.Flag) {
if _, present := flag.Annotations[BashCompOneRequiredFlag]; present {
if !flag.Changed {
// If the flag is not already present, we suggest it as a completion
completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
}
}
}
// We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
// that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
// non-inherited flags.
finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteRequiredFlags(flag)
})
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteRequiredFlags(flag)
})
return completions
}
func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
if finalCmd.DisableFlagParsing {
// We only do flag completion if we are allowed to parse flags
// This is important for commands which have requested to do their own flag completion.
return nil, args, lastArg, nil
}
var flagName string
trimmedArgs := args
flagWithEqual := false
orgLastArg := lastArg
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as that function
// requires the flag name to be complete
if len(lastArg) > 0 && lastArg[0] == '-' {
if index := strings.Index(lastArg, "="); index >= 0 {
// Flag with an =
if strings.HasPrefix(lastArg[:index], "--") {
// Flag has full name
flagName = lastArg[2:index]
} else {
// Flag is shorthand
// We have to get the last shorthand flag name
// e.g. `-asd` => d to provide the correct completion
// https://github.com/spf13/cobra/issues/1257
flagName = lastArg[index-1 : index]
}
lastArg = lastArg[index+1:]
flagWithEqual = true
} else {
// Normal flag completion
return nil, args, lastArg, nil
}
}
if len(flagName) == 0 {
if len(args) > 0 {
prevArg := args[len(args)-1]
if isFlagArg(prevArg) {
// Only consider the case where the flag does not contain an =.
// If the flag contains an = it means it has already been fully processed,
// so we don't need to deal with it here.
if index := strings.Index(prevArg, "="); index < 0 {
if strings.HasPrefix(prevArg, "--") {
// Flag has full name
flagName = prevArg[2:]
} else {
// Flag is shorthand
// We have to get the last shorthand flag name
// e.g. `-asd` => d to provide the correct completion
// https://github.com/spf13/cobra/issues/1257
flagName = prevArg[len(prevArg)-1:]
}
// Remove the uncompleted flag or else there could be an error created
// for an invalid value for that flag
trimmedArgs = args[:len(args)-1]
}
}
}
}
if len(flagName) == 0 {
// Not doing flag completion
return nil, trimmedArgs, lastArg, nil
}
flag := findFlag(finalCmd, flagName)
if flag == nil {
// Flag not supported by this command, the interspersed option might be set so return the original args
return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName}
}
if !flagWithEqual {
if len(flag.NoOptDefVal) != 0 {
// We had assumed dealing with a two-word flag but the flag is a boolean flag.
// In that case, there is no value following it, so we are not really doing flag completion.
// Reset everything to do noun completion.
trimmedArgs = args
flag = nil
}
}
return flag, trimmedArgs, lastArg, nil
}
// initDefaultCompletionCmd adds a default 'completion' command to c.
// This function will do nothing if any of the following is true:
// 1- the feature has been explicitly disabled by the program,
// 2- c has no subcommands (to avoid creating one),
// 3- c already has a 'completion' command provided by the program.
func (c *Command) initDefaultCompletionCmd() {
if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() {
return
}
for _, cmd := range c.commands {
if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) {
// A completion command is already available
return
}
}
haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions
completionCmd := &Command{
Use: compCmdName,
Short: "Generate the autocompletion script for the specified shell",
Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell.
See each sub-command's help for details on how to use the generated script.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
Hidden: c.CompletionOptions.HiddenDefaultCmd,
}
c.AddCommand(completionCmd)
out := c.OutOrStdout()
noDesc := c.CompletionOptions.DisableDescriptions
shortDesc := "Generate the autocompletion script for %s"
bash := &Command{
Use: "bash",
Short: fmt.Sprintf(shortDesc, "bash"),
Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell.
This script depends on the 'bash-completion' package.
If it is not installed already, you can install it via your OS's package manager.
To load completions in your current shell session:
source <(%[1]s completion bash)
To load completions for every new session, execute once:
#### Linux:
%[1]s completion bash > /etc/bash_completion.d/%[1]s
#### macOS:
%[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
Args: NoArgs,
DisableFlagsInUseLine: true,
ValidArgsFunction: NoFileCompletions,
RunE: func(cmd *Command, args []string) error {
return cmd.Root().GenBashCompletionV2(out, !noDesc)
},
}
if haveNoDescFlag {
bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
}
zsh := &Command{
Use: "zsh",
Short: fmt.Sprintf(shortDesc, "zsh"),
Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell.
If shell completion is not already enabled in your environment you will need
to enable it. You can execute the following once:
echo "autoload -U compinit; compinit" >> ~/.zshrc
To load completions for every new session, execute once:
#### Linux:
%[1]s completion zsh > "${fpath[1]}/_%[1]s"
#### macOS:
%[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
RunE: func(cmd *Command, args []string) error {
if noDesc {
return cmd.Root().GenZshCompletionNoDesc(out)
}
return cmd.Root().GenZshCompletion(out)
},
}
if haveNoDescFlag {
zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
}
fish := &Command{
Use: "fish",
Short: fmt.Sprintf(shortDesc, "fish"),
Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell.
To load completions in your current shell session:
%[1]s completion fish | source
To load completions for every new session, execute once:
%[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
RunE: func(cmd *Command, args []string) error {
return cmd.Root().GenFishCompletion(out, !noDesc)
},
}
if haveNoDescFlag {
fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
}
powershell := &Command{
Use: "powershell",
Short: fmt.Sprintf(shortDesc, "powershell"),
Long: fmt.Sprintf(`Generate the autocompletion script for powershell.
To load completions in your current shell session:
%[1]s completion powershell | Out-String | Invoke-Expression
To load completions for every new session, add the output of the above command
to your powershell profile.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
RunE: func(cmd *Command, args []string) error {
if noDesc {
return cmd.Root().GenPowerShellCompletion(out)
}
return cmd.Root().GenPowerShellCompletionWithDesc(out)
},
}
if haveNoDescFlag {
powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
}
completionCmd.AddCommand(bash, zsh, fish, powershell)
}
func findFlag(cmd *Command, name string) *pflag.Flag {
flagSet := cmd.Flags()
if len(name) == 1 {
// First convert the short flag into a long flag
// as the cmd.Flag() search only accepts long flags
if short := flagSet.ShorthandLookup(name); short != nil {
name = short.Name
} else {
set := cmd.InheritedFlags()
if short = set.ShorthandLookup(name); short != nil {
name = short.Name
} else {
return nil
}
}
}
return cmd.Flag(name)
}
// CompDebug prints the specified string to the same file as where the
// completion script prints its logs.
// Note that completion printouts should never be on stdout as they would
// be wrongly interpreted as actual completion choices by the completion script.
func CompDebug(msg string, printToStdErr bool) {
msg = fmt.Sprintf("[Debug] %s", msg)
// Such logs are only printed when the user has set the environment
// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
f, err := os.OpenFile(path,
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err == nil {
defer f.Close()
WriteStringAndCheck(f, msg)
}
}
if printToStdErr {
// Must print to stderr for this not to be read by the completion script.
fmt.Fprint(os.Stderr, msg)
}
}
// CompDebugln prints the specified string with a newline at the end
// to the same file as where the completion script prints its logs.
// Such logs are only printed when the user has set the environment
// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
func CompDebugln(msg string, printToStdErr bool) {
CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
}
// CompError prints the specified completion message to stderr.
func CompError(msg string) {
msg = fmt.Sprintf("[Error] %s", msg)
CompDebug(msg, true)
}
// CompErrorln prints the specified completion message to stderr with a newline at the end.
func CompErrorln(msg string) {
CompError(fmt.Sprintf("%s\n", msg))
}
| [
"\"BASH_COMP_DEBUG_FILE\""
]
| []
| [
"BASH_COMP_DEBUG_FILE"
]
| [] | ["BASH_COMP_DEBUG_FILE"] | go | 1 | 0 | |
source/helper/website_helper.py | ###############################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# PURPOSE:
# Copy files for the Content Analysis website from a source build bucket to a
# deployment bucket. This function is started as a custom resource in the
# aws-content-analysis-web.yaml cloud formation template.
#
###############################################################################
import boto3
import json
import logging
import os
from urllib.request import build_opener, HTTPHandler, Request
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
replace_env_variables = False
def send_response(event, context, response_status, response_data):
"""
Send a resource manipulation status response to CloudFormation
"""
response_body = json.dumps({
"Status": response_status,
"Reason": "See the details in CloudWatch Log Stream: " + context.log_stream_name,
"PhysicalResourceId": context.log_stream_name,
"StackId": event['StackId'],
"RequestId": event['RequestId'],
"LogicalResourceId": event['LogicalResourceId'],
"Data": response_data
})
LOGGER.info('ResponseURL: {s}'.format(s=event['ResponseURL']))
LOGGER.info('ResponseBody: {s}'.format(s=response_body))
opener = build_opener(HTTPHandler)
request = Request(event['ResponseURL'], data=response_body.encode('utf-8'))
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(response_body))
request.get_method = lambda: 'PUT'
response = opener.open(request)
LOGGER.info("Status code: {s}".format(s=response.getcode))
LOGGER.info("Status message: {s}".format(s=response.msg))
def write_to_s3(event, context, bucket, key, body):
try:
s3_client.put_object(Bucket=bucket, Key=key, Body=body)
except Exception as e:
LOGGER.info('Unable to write file to s3: {e}'.format(e=e))
send_response(event, context, "FAILED",
{"Message": "Failed to write file to s3 after variable replacement"})
else:
LOGGER.info('Wrote file back to s3 after variable replacement')
def read_from_s3(event, context, bucket, key):
try:
obj = s3_client.get_object(
Bucket=bucket,
Key=key
)
except Exception as e:
LOGGER.info(
'Unable to read key: {key} in from s3 bucket: {bucket}. Error: {e}'.format(e=e, key=key, bucket=bucket))
send_response(event, context, "FAILED",
{"Message": "Failed to read file from s3"})
else:
results = obj['Body'].read().decode('utf-8')
return results
def copy_source(event, context):
try:
source_bucket = event["ResourceProperties"]["WebsiteCodeBucket"]
source_key = event["ResourceProperties"]["WebsiteCodePrefix"]
website_bucket = event["ResourceProperties"]["DeploymentBucket"].split('.')[0]
except KeyError as e:
LOGGER.info("Failed to retrieve required values from the CloudFormation event: {e}".format(e=e))
send_response(event, context, "FAILED", {"Message": "Failed to retrieve required values from the CloudFormation event"})
else:
try:
LOGGER.info("Checking if custom environment variables are present")
try:
search = 'https://'+os.environ['SearchEndpoint']
dataplane = os.environ['DataplaneEndpoint']
workflow = os.environ['WorkflowEndpoint']
dataplane_bucket = os.environ['DataplaneBucket']
user_pool_id = os.environ['UserPoolId']
region = os.environ['AwsRegion']
client_id = os.environ['PoolClientId']
identity_id = os.environ['IdentityPoolId']
except KeyError:
replace_env_variables = False
else:
new_variables = {"SEARCH_ENDPOINT": search, "WORKFLOW_API_ENDPOINT": workflow,
"DATAPLANE_API_ENDPOINT": dataplane, "DATAPLANE_BUCKET": dataplane_bucket, "AWS_REGION": region,
"USER_POOL_ID": user_pool_id, "USER_POOL_CLIENT_ID": client_id, "IDENTITY_POOL_ID": identity_id}
replace_env_variables = True
LOGGER.info(
"New variables: {v}".format(v=new_variables))
deployment_bucket = s3.Bucket(website_bucket)
with open('./webapp-manifest.json') as file:
manifest = json.load(file)
print('UPLOADING FILES::')
for key in manifest:
print('s3://'+source_bucket+'/'+source_key+'/'+key)
copy_source = {
'Bucket': source_bucket,
'Key': source_key+'/'+key
}
s3.meta.client.copy(copy_source, website_bucket, key)
if replace_env_variables is True and key == "runtimeConfig.json":
LOGGER.info("updating runtimeConfig.json")
write_to_s3(event, context, website_bucket, key, json.dumps(new_variables))
except Exception as e:
LOGGER.info("Unable to copy website source code into the website bucket: {e}".format(e=e))
send_response(event, context, "FAILED", {"Message": "Unexpected event received from CloudFormation"})
else:
send_response(event, context, "SUCCESS",
{"Message": "Resource creation successful!"})
def lambda_handler(event, context):
"""
Handle Lambda event from AWS
"""
try:
LOGGER.info('REQUEST RECEIVED:\n {s}'.format(s=event))
LOGGER.info('REQUEST RECEIVED:\n {s}'.format(s=context))
if event['RequestType'] == 'Create':
LOGGER.info('CREATE!')
copy_source(event, context)
elif event['RequestType'] == 'Update':
LOGGER.info('UPDATE!')
copy_source(event, context)
elif event['RequestType'] == 'Delete':
LOGGER.info('DELETE!')
send_response(event, context, "SUCCESS",
{"Message": "Resource deletion successful!"})
else:
LOGGER.info('FAILED!')
send_response(event, context, "FAILED", {"Message": "Unexpected event received from CloudFormation"})
except Exception as e:
LOGGER.info('FAILED!')
send_response(event, context, "FAILED", {"Message": "Exception during processing: {e}".format(e=e)})
| []
| []
| [
"SearchEndpoint",
"UserPoolId",
"WorkflowEndpoint",
"IdentityPoolId",
"DataplaneBucket",
"DataplaneEndpoint",
"PoolClientId",
"AwsRegion"
]
| [] | ["SearchEndpoint", "UserPoolId", "WorkflowEndpoint", "IdentityPoolId", "DataplaneBucket", "DataplaneEndpoint", "PoolClientId", "AwsRegion"] | python | 8 | 0 | |
src/cmd/link/link_test.go | package main
import (
"bufio"
"bytes"
"debug/macho"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
)
var AuthorPaidByTheColumnInch struct {
fog int `text:"London. Michaelmas term lately over, and the Lord Chancellor sitting in Lincoln’s Inn Hall. Implacable November weather. As much mud in the streets as if the waters had but newly retired from the face of the earth, and it would not be wonderful to meet a Megalosaurus, forty feet long or so, waddling like an elephantine lizard up Holborn Hill. Smoke lowering down from chimney-pots, making a soft black drizzle, with flakes of soot in it as big as full-grown snowflakes—gone into mourning, one might imagine, for the death of the sun. Dogs, undistinguishable in mire. Horses, scarcely better; splashed to their very blinkers. Foot passengers, jostling one another’s umbrellas in a general infection of ill temper, and losing their foot-hold at street-corners, where tens of thousands of other foot passengers have been slipping and sliding since the day broke (if this day ever broke), adding new deposits to the crust upon crust of mud, sticking at those points tenaciously to the pavement, and accumulating at compound interest. Fog everywhere. Fog up the river, where it flows among green aits and meadows; fog down the river, where it rolls defiled among the tiers of shipping and the waterside pollutions of a great (and dirty) city. Fog on the Essex marshes, fog on the Kentish heights. Fog creeping into the cabooses of collier-brigs; fog lying out on the yards and hovering in the rigging of great ships; fog drooping on the gunwales of barges and small boats. Fog in the eyes and throats of ancient Greenwich pensioners, wheezing by the firesides of their wards; fog in the stem and bowl of the afternoon pipe of the wrathful skipper, down in his close cabin; fog cruelly pinching the toes and fingers of his shivering little ‘prentice boy on deck. Chance people on the bridges peeping over the parapets into a nether sky of fog, with fog all round them, as if they were up in a balloon and hanging in the misty clouds. Gas looming through the fog in divers places in the streets, much as the sun may, from the spongey fields, be seen to loom by husbandman and ploughboy. Most of the shops lighted two hours before their time—as the gas seems to know, for it has a haggard and unwilling look. The raw afternoon is rawest, and the dense fog is densest, and the muddy streets are muddiest near that leaden-headed old obstruction, appropriate ornament for the threshold of a leaden-headed old corporation, Temple Bar. And hard by Temple Bar, in Lincoln’s Inn Hall, at the very heart of the fog, sits the Lord High Chancellor in his High Court of Chancery."`
wind int `text:"It was grand to see how the wind awoke, and bent the trees, and drove the rain before it like a cloud of smoke; and to hear the solemn thunder, and to see the lightning; and while thinking with awe of the tremendous powers by which our little lives are encompassed, to consider how beneficent they are, and how upon the smallest flower and leaf there was already a freshness poured from all this seeming rage, which seemed to make creation new again."`
jarndyce int `text:"Jarndyce and Jarndyce drones on. This scarecrow of a suit has, over the course of time, become so complicated, that no man alive knows what it means. The parties to it understand it least; but it has been observed that no two Chancery lawyers can talk about it for five minutes, without coming to a total disagreement as to all the premises. Innumerable children have been born into the cause; innumerable young people have married into it; innumerable old people have died out of it. Scores of persons have deliriously found themselves made parties in Jarndyce and Jarndyce, without knowing how or why; whole families have inherited legendary hatreds with the suit. The little plaintiff or defendant, who was promised a new rocking-horse when Jarndyce and Jarndyce should be settled, has grown up, possessed himself of a real horse, and trotted away into the other world. Fair wards of court have faded into mothers and grandmothers; a long procession of Chancellors has come in and gone out; the legion of bills in the suit have been transformed into mere bills of mortality; there are not three Jarndyces left upon the earth perhaps, since old Tom Jarndyce in despair blew his brains out at a coffee-house in Chancery Lane; but Jarndyce and Jarndyce still drags its dreary length before the Court, perennially hopeless."`
principle int `text:"The one great principle of the English law is, to make business for itself. There is no other principle distinctly, certainly, and consistently maintained through all its narrow turnings. Viewed by this light it becomes a coherent scheme, and not the monstrous maze the laity are apt to think it. Let them but once clearly perceive that its grand principle is to make business for itself at their expense, and surely they will cease to grumble."`
}
func TestLargeSymName(t *testing.T) {
// The compiler generates a symbol name using the string form of the
// type. This tests that the linker can read symbol names larger than
// the bufio buffer. Issue #15104.
_ = AuthorPaidByTheColumnInch
}
func TestIssue21703(t *testing.T) {
t.Parallel()
testenv.MustHaveGoBuild(t)
const source = `
package main
const X = "\n!\n"
func main() {}
`
tmpdir, err := ioutil.TempDir("", "issue21703")
if err != nil {
t.Fatalf("failed to create temp dir: %v\n", err)
}
defer os.RemoveAll(tmpdir)
err = ioutil.WriteFile(filepath.Join(tmpdir, "main.go"), []byte(source), 0666)
if err != nil {
t.Fatalf("failed to write main.go: %v\n", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "main.go")
cmd.Dir = tmpdir
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("failed to compile main.go: %v, output: %s\n", err, out)
}
cmd = exec.Command(testenv.GoToolPath(t), "tool", "link", "main.o")
cmd.Dir = tmpdir
out, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("failed to link main.o: %v, output: %s\n", err, out)
}
}
// TestIssue28429 ensures that the linker does not attempt to link
// sections not named *.o. Such sections may be used by a build system
// to, for example, save facts produced by a modular static analysis
// such as golang.org/x/tools/go/analysis.
func TestIssue28429(t *testing.T) {
t.Parallel()
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "issue28429-")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpdir)
write := func(name, content string) {
err := ioutil.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666)
if err != nil {
t.Fatal(err)
}
}
runGo := func(args ...string) {
cmd := exec.Command(testenv.GoToolPath(t), args...)
cmd.Dir = tmpdir
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("'go %s' failed: %v, output: %s",
strings.Join(args, " "), err, out)
}
}
// Compile a main package.
write("main.go", "package main; func main() {}")
runGo("tool", "compile", "-p", "main", "main.go")
runGo("tool", "pack", "c", "main.a", "main.o")
// Add an extra section with a short, non-.o name.
// This simulates an alternative build system.
write(".facts", "this is not an object file")
runGo("tool", "pack", "r", "main.a", ".facts")
// Verify that the linker does not attempt
// to compile the extra section.
runGo("tool", "link", "main.a")
}
func TestUnresolved(t *testing.T) {
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "unresolved-")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpdir)
write := func(name, content string) {
err := ioutil.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666)
if err != nil {
t.Fatal(err)
}
}
// Test various undefined references. Because of issue #29852,
// this used to give confusing error messages because the
// linker would find an undefined reference to "zero" created
// by the runtime package.
write("go.mod", "module testunresolved\n")
write("main.go", `package main
func main() {
x()
}
func x()
`)
write("main.s", `
TEXT ·x(SB),0,$0
MOVD zero<>(SB), AX
MOVD zero(SB), AX
MOVD ·zero(SB), AX
RET
`)
cmd := exec.Command(testenv.GoToolPath(t), "build")
cmd.Dir = tmpdir
cmd.Env = append(os.Environ(),
"GOARCH=amd64", "GOOS=linux", "GOPATH="+filepath.Join(tmpdir, "_gopath"))
out, err := cmd.CombinedOutput()
if err == nil {
t.Fatalf("expected build to fail, but it succeeded")
}
out = regexp.MustCompile("(?m)^#.*\n").ReplaceAll(out, nil)
got := string(out)
want := `main.x: relocation target zero not defined
main.x: relocation target zero not defined
main.x: relocation target main.zero not defined
`
if want != got {
t.Fatalf("want:\n%sgot:\n%s", want, got)
}
}
func TestIssue33979(t *testing.T) {
testenv.MustHaveGoBuild(t)
testenv.MustHaveCGO(t)
// Skip test on platforms that do not support cgo internal linking.
switch runtime.GOARCH {
case "mips", "mipsle", "mips64", "mips64le":
t.Skipf("Skipping on %s/%s", runtime.GOOS, runtime.GOARCH)
}
if runtime.GOOS == "aix" {
t.Skipf("Skipping on %s/%s", runtime.GOOS, runtime.GOARCH)
}
tmpdir, err := ioutil.TempDir("", "unresolved-")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpdir)
write := func(name, content string) {
err := ioutil.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666)
if err != nil {
t.Fatal(err)
}
}
run := func(name string, args ...string) string {
cmd := exec.Command(name, args...)
cmd.Dir = tmpdir
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("'go %s' failed: %v, output: %s", strings.Join(args, " "), err, out)
}
return string(out)
}
runGo := func(args ...string) string {
return run(testenv.GoToolPath(t), args...)
}
// Test object with undefined reference that was not generated
// by Go, resulting in an SXREF symbol being loaded during linking.
// Because of issue #33979, the SXREF symbol would be found during
// error reporting, resulting in confusing error messages.
write("main.go", `package main
func main() {
x()
}
func x()
`)
// The following assembly must work on all architectures.
write("x.s", `
TEXT ·x(SB),0,$0
CALL foo(SB)
RET
`)
write("x.c", `
void undefined();
void foo() {
undefined();
}
`)
cc := strings.TrimSpace(runGo("env", "CC"))
cflags := strings.Fields(runGo("env", "GOGCCFLAGS"))
// Compile, assemble and pack the Go and C code.
runGo("tool", "asm", "-gensymabis", "-o", "symabis", "x.s")
runGo("tool", "compile", "-symabis", "symabis", "-p", "main", "-o", "x1.o", "main.go")
runGo("tool", "asm", "-o", "x2.o", "x.s")
run(cc, append(cflags, "-c", "-o", "x3.o", "x.c")...)
runGo("tool", "pack", "c", "x.a", "x1.o", "x2.o", "x3.o")
// Now attempt to link using the internal linker.
cmd := exec.Command(testenv.GoToolPath(t), "tool", "link", "-linkmode=internal", "x.a")
cmd.Dir = tmpdir
out, err := cmd.CombinedOutput()
if err == nil {
t.Fatalf("expected link to fail, but it succeeded")
}
re := regexp.MustCompile(`(?m)^main\(.*text\): relocation target undefined not defined$`)
if !re.Match(out) {
t.Fatalf("got:\n%q\nwant:\n%s", out, re)
}
}
func TestBuildForTvOS(t *testing.T) {
testenv.MustHaveCGO(t)
testenv.MustHaveGoBuild(t)
// Only run this on darwin/amd64, where we can cross build for tvOS.
if runtime.GOARCH != "amd64" || runtime.GOOS != "darwin" {
t.Skip("skipping on non-darwin/amd64 platform")
}
if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
t.Skip("skipping in -short mode with $GO_BUILDER_NAME empty")
}
if err := exec.Command("xcrun", "--help").Run(); err != nil {
t.Skipf("error running xcrun, required for iOS cross build: %v", err)
}
sdkPath, err := exec.Command("xcrun", "--sdk", "appletvos", "--show-sdk-path").Output()
if err != nil {
t.Skip("failed to locate appletvos SDK, skipping")
}
CC := []string{
"clang",
"-arch",
"arm64",
"-isysroot", strings.TrimSpace(string(sdkPath)),
"-mtvos-version-min=12.0",
"-fembed-bitcode",
"-framework", "CoreFoundation",
}
lib := filepath.Join("testdata", "testBuildFortvOS", "lib.go")
tmpDir, err := ioutil.TempDir("", "go-link-TestBuildFortvOS")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
ar := filepath.Join(tmpDir, "lib.a")
cmd := exec.Command(testenv.GoToolPath(t), "build", "-buildmode=c-archive", "-o", ar, lib)
cmd.Env = append(os.Environ(),
"CGO_ENABLED=1",
"GOOS=darwin",
"GOARCH=arm64",
"CC="+strings.Join(CC, " "),
"CGO_CFLAGS=", // ensure CGO_CFLAGS does not contain any flags. Issue #35459
)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
}
link := exec.Command(CC[0], CC[1:]...)
link.Args = append(link.Args, ar, filepath.Join("testdata", "testBuildFortvOS", "main.m"))
if out, err := link.CombinedOutput(); err != nil {
t.Fatalf("%v: %v:\n%s", link.Args, err, out)
}
}
var testXFlagSrc = `
package main
var X = "hello"
var Z = [99999]int{99998:12345} // make it large enough to be mmaped
func main() { println(X) }
`
func TestXFlag(t *testing.T) {
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "TestXFlag")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
src := filepath.Join(tmpdir, "main.go")
err = ioutil.WriteFile(src, []byte(testXFlagSrc), 0666)
if err != nil {
t.Fatal(err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-ldflags=-X=main.X=meow", "-o", filepath.Join(tmpdir, "main"), src)
if out, err := cmd.CombinedOutput(); err != nil {
t.Errorf("%v: %v:\n%s", cmd.Args, err, out)
}
}
var testMacOSVersionSrc = `
package main
func main() { }
`
func TestMacOSVersion(t *testing.T) {
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "TestMacOSVersion")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
src := filepath.Join(tmpdir, "main.go")
err = ioutil.WriteFile(src, []byte(testMacOSVersionSrc), 0666)
if err != nil {
t.Fatal(err)
}
exe := filepath.Join(tmpdir, "main")
cmd := exec.Command(testenv.GoToolPath(t), "build", "-ldflags=-linkmode=internal", "-o", exe, src)
cmd.Env = append(os.Environ(),
"CGO_ENABLED=0",
"GOOS=darwin",
"GOARCH=amd64",
)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
}
exef, err := os.Open(exe)
if err != nil {
t.Fatal(err)
}
exem, err := macho.NewFile(exef)
if err != nil {
t.Fatal(err)
}
found := false
const LC_VERSION_MIN_MACOSX = 0x24
checkMin := func(ver uint32) {
major, minor := (ver>>16)&0xff, (ver>>8)&0xff
if major != 10 || minor < 9 {
t.Errorf("LC_VERSION_MIN_MACOSX version %d.%d < 10.9", major, minor)
}
}
for _, cmd := range exem.Loads {
raw := cmd.Raw()
type_ := exem.ByteOrder.Uint32(raw)
if type_ != LC_VERSION_MIN_MACOSX {
continue
}
osVer := exem.ByteOrder.Uint32(raw[8:])
checkMin(osVer)
sdkVer := exem.ByteOrder.Uint32(raw[12:])
checkMin(sdkVer)
found = true
break
}
if !found {
t.Errorf("no LC_VERSION_MIN_MACOSX load command found")
}
}
const Issue34788src = `
package blah
func Blah(i int) int {
a := [...]int{1, 2, 3, 4, 5, 6, 7, 8}
return a[i&7]
}
`
func TestIssue34788Android386TLSSequence(t *testing.T) {
testenv.MustHaveGoBuild(t)
// This is a cross-compilation test, so it doesn't make
// sense to run it on every GOOS/GOARCH combination. Limit
// the test to amd64 + darwin/linux.
if runtime.GOARCH != "amd64" ||
(runtime.GOOS != "darwin" && runtime.GOOS != "linux") {
t.Skip("skipping on non-{linux,darwin}/amd64 platform")
}
tmpdir, err := ioutil.TempDir("", "TestIssue34788Android386TLSSequence")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
src := filepath.Join(tmpdir, "blah.go")
err = ioutil.WriteFile(src, []byte(Issue34788src), 0666)
if err != nil {
t.Fatal(err)
}
obj := filepath.Join(tmpdir, "blah.o")
cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-o", obj, src)
cmd.Env = append(os.Environ(), "GOARCH=386", "GOOS=android")
if out, err := cmd.CombinedOutput(); err != nil {
if err != nil {
t.Fatalf("failed to compile blah.go: %v, output: %s\n", err, out)
}
}
// Run objdump on the resulting object.
cmd = exec.Command(testenv.GoToolPath(t), "tool", "objdump", obj)
out, oerr := cmd.CombinedOutput()
if oerr != nil {
t.Fatalf("failed to objdump blah.o: %v, output: %s\n", oerr, out)
}
// Sift through the output; we should not be seeing any R_TLS_LE relocs.
scanner := bufio.NewScanner(bytes.NewReader(out))
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "R_TLS_LE") {
t.Errorf("objdump output contains unexpected R_TLS_LE reloc: %s", line)
}
}
}
const testStrictDupGoSrc = `
package main
func f()
func main() { f() }
`
const testStrictDupAsmSrc1 = `
#include "textflag.h"
TEXT ·f(SB), NOSPLIT|DUPOK, $0-0
RET
`
const testStrictDupAsmSrc2 = `
#include "textflag.h"
TEXT ·f(SB), NOSPLIT|DUPOK, $0-0
JMP 0(PC)
`
func TestStrictDup(t *testing.T) {
// Check that -strictdups flag works.
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "TestStrictDup")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
src := filepath.Join(tmpdir, "x.go")
err = ioutil.WriteFile(src, []byte(testStrictDupGoSrc), 0666)
if err != nil {
t.Fatal(err)
}
src = filepath.Join(tmpdir, "a.s")
err = ioutil.WriteFile(src, []byte(testStrictDupAsmSrc1), 0666)
if err != nil {
t.Fatal(err)
}
src = filepath.Join(tmpdir, "b.s")
err = ioutil.WriteFile(src, []byte(testStrictDupAsmSrc2), 0666)
if err != nil {
t.Fatal(err)
}
src = filepath.Join(tmpdir, "go.mod")
err = ioutil.WriteFile(src, []byte("module teststrictdup\n"), 0666)
if err != nil {
t.Fatal(err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-ldflags=-strictdups=1")
cmd.Dir = tmpdir
out, err := cmd.CombinedOutput()
if err != nil {
t.Errorf("linking with -strictdups=1 failed: %v", err)
}
if !bytes.Contains(out, []byte("mismatched payload")) {
t.Errorf("unexpected output:\n%s", out)
}
cmd = exec.Command(testenv.GoToolPath(t), "build", "-ldflags=-strictdups=2")
cmd.Dir = tmpdir
out, err = cmd.CombinedOutput()
if err == nil {
t.Errorf("linking with -strictdups=2 did not fail")
}
if !bytes.Contains(out, []byte("mismatched payload")) {
t.Errorf("unexpected output:\n%s", out)
}
}
func TestOldLink(t *testing.T) {
// Test that old object file format still works.
// TODO(go115newobj): delete.
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "TestOldLink")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
src := filepath.Join(tmpdir, "main.go")
err = ioutil.WriteFile(src, []byte("package main; func main(){}\n"), 0666)
if err != nil {
t.Fatal(err)
}
cmd := exec.Command(testenv.GoToolPath(t), "run", "-gcflags=all=-go115newobj=false", "-asmflags=all=-go115newobj=false", "-ldflags=-go115newobj=false", src)
if out, err := cmd.CombinedOutput(); err != nil {
t.Errorf("%v: %v:\n%s", cmd.Args, err, out)
}
}
const testFuncAlignSrc = `
package main
import (
"fmt"
"reflect"
)
func alignPc()
func main() {
addr := reflect.ValueOf(alignPc).Pointer()
if (addr % 512) != 0 {
fmt.Printf("expected 512 bytes alignment, got %v\n", addr)
} else {
fmt.Printf("PASS")
}
}
`
const testFuncAlignAsmSrc = `
#include "textflag.h"
TEXT ·alignPc(SB),NOSPLIT, $0-0
MOVD $2, R0
PCALIGN $512
MOVD $3, R1
RET
`
// TestFuncAlign verifies that the address of a function can be aligned
// with a specfic value on arm64.
func TestFuncAlign(t *testing.T) {
if runtime.GOARCH != "arm64" || runtime.GOOS != "linux" {
t.Skip("skipping on non-linux/arm64 platform")
}
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "TestFuncAlign")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
src := filepath.Join(tmpdir, "falign.go")
err = ioutil.WriteFile(src, []byte(testFuncAlignSrc), 0666)
if err != nil {
t.Fatal(err)
}
src = filepath.Join(tmpdir, "falign.s")
err = ioutil.WriteFile(src, []byte(testFuncAlignAsmSrc), 0666)
if err != nil {
t.Fatal(err)
}
// Build and run with old object file format.
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "falign")
cmd.Dir = tmpdir
out, err := cmd.CombinedOutput()
if err != nil {
t.Errorf("build failed: %v", err)
}
cmd = exec.Command(tmpdir + "/falign")
out, err = cmd.CombinedOutput()
if err != nil {
t.Errorf("failed to run with err %v, output: %s", err, out)
}
if string(out) != "PASS" {
t.Errorf("unexpected output: %s\n", out)
}
}
const helloSrc = `
package main
import "fmt"
func main() { fmt.Println("hello") }
`
func TestTrampoline(t *testing.T) {
// Test that trampoline insertion works as expected.
// For stress test, we set -debugtramp=2 flag, which sets a very low
// threshold for trampoline generation, and essentially all cross-package
// calls will use trampolines.
switch runtime.GOARCH {
case "arm", "ppc64", "ppc64le":
default:
t.Skipf("trampoline insertion is not implemented on %s", runtime.GOARCH)
}
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "TestTrampoline")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
src := filepath.Join(tmpdir, "hello.go")
err = ioutil.WriteFile(src, []byte(helloSrc), 0666)
if err != nil {
t.Fatal(err)
}
exe := filepath.Join(tmpdir, "hello.exe")
cmd := exec.Command(testenv.GoToolPath(t), "build", "-ldflags=-debugtramp=2", "-o", exe, src)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("build failed: %v\n%s", err, out)
}
cmd = exec.Command(exe)
out, err = cmd.CombinedOutput()
if err != nil {
t.Errorf("executable failed to run: %v\n%s", err, out)
}
if string(out) != "hello\n" {
t.Errorf("unexpected output:\n%s", out)
}
}
func TestIndexMismatch(t *testing.T) {
// Test that index mismatch will cause a link-time error (not run-time error).
// This shouldn't happen with "go build". We invoke the compiler and the linker
// manually, and try to "trick" the linker with an inconsistent object file.
testenv.MustHaveGoBuild(t)
tmpdir, err := ioutil.TempDir("", "TestIndexMismatch")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
aSrc := filepath.Join("testdata", "testIndexMismatch", "a.go")
bSrc := filepath.Join("testdata", "testIndexMismatch", "b.go")
mSrc := filepath.Join("testdata", "testIndexMismatch", "main.go")
aObj := filepath.Join(tmpdir, "a.o")
mObj := filepath.Join(tmpdir, "main.o")
exe := filepath.Join(tmpdir, "main.exe")
// Build a program with main package importing package a.
cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-o", aObj, aSrc)
t.Log(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("compiling a.go failed: %v\n%s", err, out)
}
cmd = exec.Command(testenv.GoToolPath(t), "tool", "compile", "-I", tmpdir, "-o", mObj, mSrc)
t.Log(cmd)
out, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("compiling main.go failed: %v\n%s", err, out)
}
cmd = exec.Command(testenv.GoToolPath(t), "tool", "link", "-L", tmpdir, "-o", exe, mObj)
t.Log(cmd)
out, err = cmd.CombinedOutput()
if err != nil {
t.Errorf("linking failed: %v\n%s", err, out)
}
// Now, overwrite a.o with the object of b.go. This should
// result in an index mismatch.
cmd = exec.Command(testenv.GoToolPath(t), "tool", "compile", "-o", aObj, bSrc)
t.Log(cmd)
out, err = cmd.CombinedOutput()
if err != nil {
t.Fatalf("compiling a.go failed: %v\n%s", err, out)
}
cmd = exec.Command(testenv.GoToolPath(t), "tool", "link", "-L", tmpdir, "-o", exe, mObj)
t.Log(cmd)
out, err = cmd.CombinedOutput()
if err == nil {
t.Fatalf("linking didn't fail")
}
if !bytes.Contains(out, []byte("fingerprint mismatch")) {
t.Errorf("did not see expected error message. out:\n%s", out)
}
}
| [
"\"GO_BUILDER_NAME\""
]
| []
| [
"GO_BUILDER_NAME"
]
| [] | ["GO_BUILDER_NAME"] | go | 1 | 0 | |
itext/src/main/java/com/itextpdf/text/FontFactoryImp.java | /*
*
* This file is part of the iText (R) project.
Copyright (c) 1998-2018 iText Group NV
* Authors: Bruno Lowagie, Paulo Soares, et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED BY
* ITEXT GROUP. ITEXT GROUP DISCLAIMS THE WARRANTY OF NON INFRINGEMENT
* OF THIRD PARTY RIGHTS
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://itextpdf.com/terms-of-use/
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Public License.
*
* In accordance with Section 7(b) of the GNU Affero General Public License,
* a covered work must retain the producer line in every PDF that is created
* or manipulated using iText.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the iText software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers as an ASP,
* serving PDFs on the fly in a web application, shipping iText with a closed
* source product.
*
* For more information, please contact iText Software Corp. at this
* address: [email protected]
*/
package com.itextpdf.text;
import com.itextpdf.text.Font.FontFamily;
import com.itextpdf.text.log.Level;
import com.itextpdf.text.log.Logger;
import com.itextpdf.text.log.LoggerFactory;
import com.itextpdf.text.pdf.BaseFont;
import java.io.File;
import java.io.IOException;
import java.util.*;
/**
* If you are using True Type fonts, you can declare the paths of the different ttf- and ttc-files
* to this class first and then create fonts in your code using one of the getFont method
* without having to enter a path as parameter.
*
* @author Bruno Lowagie
*/
public class FontFactoryImp implements FontProvider {
private static final Logger LOGGER = LoggerFactory.getLogger(FontFactoryImp.class);
/** This is a map of postscriptfontnames of True Type fonts and the path of their ttf- or ttc-file. */
private final Hashtable<String, String> trueTypeFonts = new Hashtable<String, String>();
private static String[] TTFamilyOrder = {
"3", "1", "1033",
"3", "0", "1033",
"1", "0", "0",
"0", "3", "0"
};
/** This is a map of fontfamilies. */
private final Hashtable<String, ArrayList<String>> fontFamilies = new Hashtable<String, ArrayList<String>>();
/** This is the default encoding to use. */
public String defaultEncoding = BaseFont.WINANSI;
/** This is the default value of the <VAR>embedded</VAR> variable. */
public boolean defaultEmbedding = BaseFont.NOT_EMBEDDED;
/** Creates new FontFactory */
public FontFactoryImp() {
trueTypeFonts.put(FontFactory.COURIER.toLowerCase(), FontFactory.COURIER);
trueTypeFonts.put(FontFactory.COURIER_BOLD.toLowerCase(), FontFactory.COURIER_BOLD);
trueTypeFonts.put(FontFactory.COURIER_OBLIQUE.toLowerCase(), FontFactory.COURIER_OBLIQUE);
trueTypeFonts.put(FontFactory.COURIER_BOLDOBLIQUE.toLowerCase(), FontFactory.COURIER_BOLDOBLIQUE);
trueTypeFonts.put(FontFactory.HELVETICA.toLowerCase(), FontFactory.HELVETICA);
trueTypeFonts.put(FontFactory.HELVETICA_BOLD.toLowerCase(), FontFactory.HELVETICA_BOLD);
trueTypeFonts.put(FontFactory.HELVETICA_OBLIQUE.toLowerCase(), FontFactory.HELVETICA_OBLIQUE);
trueTypeFonts.put(FontFactory.HELVETICA_BOLDOBLIQUE.toLowerCase(), FontFactory.HELVETICA_BOLDOBLIQUE);
trueTypeFonts.put(FontFactory.SYMBOL.toLowerCase(), FontFactory.SYMBOL);
trueTypeFonts.put(FontFactory.TIMES_ROMAN.toLowerCase(), FontFactory.TIMES_ROMAN);
trueTypeFonts.put(FontFactory.TIMES_BOLD.toLowerCase(), FontFactory.TIMES_BOLD);
trueTypeFonts.put(FontFactory.TIMES_ITALIC.toLowerCase(), FontFactory.TIMES_ITALIC);
trueTypeFonts.put(FontFactory.TIMES_BOLDITALIC.toLowerCase(), FontFactory.TIMES_BOLDITALIC);
trueTypeFonts.put(FontFactory.ZAPFDINGBATS.toLowerCase(), FontFactory.ZAPFDINGBATS);
ArrayList<String> tmp;
tmp = new ArrayList<String>();
tmp.add(FontFactory.COURIER);
tmp.add(FontFactory.COURIER_BOLD);
tmp.add(FontFactory.COURIER_OBLIQUE);
tmp.add(FontFactory.COURIER_BOLDOBLIQUE);
fontFamilies.put(FontFactory.COURIER.toLowerCase(), tmp);
tmp = new ArrayList<String>();
tmp.add(FontFactory.HELVETICA);
tmp.add(FontFactory.HELVETICA_BOLD);
tmp.add(FontFactory.HELVETICA_OBLIQUE);
tmp.add(FontFactory.HELVETICA_BOLDOBLIQUE);
fontFamilies.put(FontFactory.HELVETICA.toLowerCase(), tmp);
tmp = new ArrayList<String>();
tmp.add(FontFactory.SYMBOL);
fontFamilies.put(FontFactory.SYMBOL.toLowerCase(), tmp);
tmp = new ArrayList<String>();
tmp.add(FontFactory.TIMES_ROMAN);
tmp.add(FontFactory.TIMES_BOLD);
tmp.add(FontFactory.TIMES_ITALIC);
tmp.add(FontFactory.TIMES_BOLDITALIC);
fontFamilies.put(FontFactory.TIMES.toLowerCase(), tmp);
fontFamilies.put(FontFactory.TIMES_ROMAN.toLowerCase(), tmp);
tmp = new ArrayList<String>();
tmp.add(FontFactory.ZAPFDINGBATS);
fontFamilies.put(FontFactory.ZAPFDINGBATS.toLowerCase(), tmp);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @param embedded true if the font is to be embedded in the PDF
* @param size the size of this font
* @param style the style of this font
* @param color the <CODE>BaseColor</CODE> of this font.
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final String encoding, final boolean embedded, final float size, final int style, final BaseColor color) {
return getFont(fontname, encoding, embedded, size, style, color, true);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @param embedded true if the font is to be embedded in the PDF
* @param size the size of this font
* @param style the style of this font
* @param color the <CODE>BaseColor</CODE> of this font.
* @param cached true if the font comes from the cache or is added to
* the cache if new, false if the font is always created new
* @return the Font constructed based on the parameters
*/
public Font getFont(String fontname, final String encoding, final boolean embedded, final float size, int style, final BaseColor color, final boolean cached) {
if (fontname == null) return new Font(FontFamily.UNDEFINED, size, style, color);
String lowercasefontname = fontname.toLowerCase();
ArrayList<String> tmp = fontFamilies.get(lowercasefontname);
if (tmp != null) {
synchronized (tmp) {
// some bugs were fixed here by Daniel Marczisovszky
int s = style == Font.UNDEFINED ? Font.NORMAL : style;
int fs = Font.NORMAL;
boolean found = false;
for (String f : tmp) {
String lcf = f.toLowerCase();
fs = Font.NORMAL;
if (lcf.indexOf("bold") != -1) fs |= Font.BOLD;
if (lcf.indexOf("italic") != -1 || lcf.indexOf("oblique") != -1) fs |= Font.ITALIC;
if ((s & Font.BOLDITALIC) == fs) {
fontname = f;
found = true;
break;
}
}
if (style != Font.UNDEFINED && found) {
style &= ~fs;
}
}
}
BaseFont basefont = null;
try {
basefont = getBaseFont(fontname, encoding, embedded, cached);
if (basefont == null) {
// the font is not registered as truetype font
return new Font(FontFamily.UNDEFINED, size, style, color);
}
}
catch(DocumentException de) {
// this shouldn't happen
throw new ExceptionConverter(de);
}
catch(IOException ioe) {
// the font is registered as a true type font, but the path was wrong
return new Font(FontFamily.UNDEFINED, size, style, color);
}
catch(NullPointerException npe) {
// null was entered as fontname and/or encoding
return new Font(FontFamily.UNDEFINED, size, style, color);
}
return new Font(basefont, size, style, color);
}
protected BaseFont getBaseFont(String fontname, final String encoding, final boolean embedded, final boolean cached) throws IOException, DocumentException {
BaseFont basefont = null;
try {
// the font is a type 1 font or CJK font
basefont = BaseFont.createFont(fontname, encoding, embedded, cached, null, null, true);
} catch (DocumentException de) {
}
if (basefont == null) {
// the font is a true type font or an unknown font
fontname = trueTypeFonts.get(fontname.toLowerCase());
// the font is not registered as truetype font
if (fontname != null)
basefont = BaseFont.createFont(fontname, encoding, embedded, cached, null, null);
}
return basefont;
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @param embedded true if the font is to be embedded in the PDF
* @param size the size of this font
* @param style the style of this font
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final String encoding, final boolean embedded, final float size, final int style) {
return getFont(fontname, encoding, embedded, size, style, null);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @param embedded true if the font is to be embedded in the PDF
* @param size the size of this font
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final String encoding, final boolean embedded, final float size) {
return getFont(fontname, encoding, embedded, size, Font.UNDEFINED, null);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @param embedded true if the font is to be embedded in the PDF
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final String encoding, final boolean embedded) {
return getFont(fontname, encoding, embedded, Font.UNDEFINED, Font.UNDEFINED, null);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @param size the size of this font
* @param style the style of this font
* @param color the <CODE>BaseColor</CODE> of this font.
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final String encoding, final float size, final int style, final BaseColor color) {
return getFont(fontname, encoding, defaultEmbedding, size, style, color);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @param size the size of this font
* @param style the style of this font
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final String encoding, final float size, final int style) {
return getFont(fontname, encoding, defaultEmbedding, size, style, null);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @param size the size of this font
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final String encoding, final float size) {
return getFont(fontname, encoding, defaultEmbedding, size, Font.UNDEFINED, null);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param size the size of this font
* @param color the <CODE>BaseColor</CODE> of this font.
* @return the Font constructed based on the parameters
* @since 2.1.0
*/
public Font getFont(final String fontname, final float size, final BaseColor color) {
return getFont(fontname, defaultEncoding, defaultEmbedding, size, Font.UNDEFINED, color);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param encoding the encoding of the font
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final String encoding) {
return getFont(fontname, encoding, defaultEmbedding, Font.UNDEFINED, Font.UNDEFINED, null);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param size the size of this font
* @param style the style of this font
* @param color the <CODE>BaseColor</CODE> of this font.
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final float size, final int style, final BaseColor color) {
return getFont(fontname, defaultEncoding, defaultEmbedding, size, style, color);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param size the size of this font
* @param style the style of this font
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final float size, final int style) {
return getFont(fontname, defaultEncoding, defaultEmbedding, size, style, null);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @param size the size of this font
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname, final float size) {
return getFont(fontname, defaultEncoding, defaultEmbedding, size, Font.UNDEFINED, null);
}
/**
* Constructs a <CODE>Font</CODE>-object.
*
* @param fontname the name of the font
* @return the Font constructed based on the parameters
*/
public Font getFont(final String fontname) {
return getFont(fontname, defaultEncoding, defaultEmbedding, Font.UNDEFINED, Font.UNDEFINED, null);
}
/**
* Register a font by giving explicitly the font family and name.
* @param familyName the font family
* @param fullName the font name
* @param path the font path
*/
public void registerFamily(final String familyName, final String fullName, final String path) {
if (path != null)
trueTypeFonts.put(fullName, path);
ArrayList<String> tmp;
synchronized (fontFamilies) {
tmp = fontFamilies.get(familyName);
if (tmp == null) {
tmp = new ArrayList<String>();
fontFamilies.put(familyName, tmp);
}
}
synchronized (tmp) {
if (!tmp.contains(fullName)) {
int fullNameLength = fullName.length();
boolean inserted = false;
for (int j = 0; j < tmp.size(); ++j) {
if (tmp.get(j).length() >= fullNameLength) {
tmp.add(j, fullName);
inserted = true;
break;
}
}
if (!inserted) {
tmp.add(fullName);
String newFullName = fullName.toLowerCase();
if (newFullName.endsWith("regular")) {
//remove "regular" at the end of the font name
newFullName = newFullName.substring(0, newFullName.length() - 7).trim();
//insert this font name at the first position for higher priority
tmp.add(0, fullName.substring(0, newFullName.length()));
}
}
}
}
}
/**
* Register a ttf- or a ttc-file.
*
* @param path the path to a ttf- or ttc-file
*/
public void register(final String path) {
register(path, null);
}
/**
* Register a font file and use an alias for the font contained in it.
*
* @param path the path to a font file
* @param alias the alias you want to use for the font
*/
public void register(final String path, final String alias) {
try {
if (path.toLowerCase().endsWith(".ttf") || path.toLowerCase().endsWith(".otf") || path.toLowerCase().indexOf(".ttc,") > 0) {
Object allNames[] = BaseFont.getAllFontNames(path, BaseFont.WINANSI, null);
trueTypeFonts.put(((String)allNames[0]).toLowerCase(), path);
if (alias != null) {
String lcAlias = alias.toLowerCase();
trueTypeFonts.put(lcAlias, path);
if (lcAlias.endsWith("regular")) {
//do this job to give higher priority to regular fonts in comparison with light, narrow, etc
saveCopyOfRegularFont(lcAlias, path);
}
}
// register all the font names with all the locales
String[][] names = (String[][])allNames[2]; //full name
for (String[] name : names) {
String lcName = name[3].toLowerCase();
trueTypeFonts.put(lcName, path);
if (lcName.endsWith("regular")) {
//do this job to give higher priority to regular fonts in comparison with light, narrow, etc
saveCopyOfRegularFont(lcName, path);
}
}
String fullName = null;
String familyName = null;
names = (String[][])allNames[1]; //family name
for (int k = 0; k < TTFamilyOrder.length; k += 3) {
for (String[] name : names) {
if (TTFamilyOrder[k].equals(name[0]) && TTFamilyOrder[k + 1].equals(name[1]) && TTFamilyOrder[k + 2].equals(name[2])) {
familyName = name[3].toLowerCase();
k = TTFamilyOrder.length;
break;
}
}
}
if (familyName != null) {
String lastName = "";
names = (String[][])allNames[2]; //full name
for (String[] name : names) {
for (int k = 0; k < TTFamilyOrder.length; k += 3) {
if (TTFamilyOrder[k].equals(name[0]) && TTFamilyOrder[k + 1].equals(name[1]) && TTFamilyOrder[k + 2].equals(name[2])) {
fullName = name[3];
if (fullName.equals(lastName))
continue;
lastName = fullName;
registerFamily(familyName, fullName, null);
break;
}
}
}
}
}
else if (path.toLowerCase().endsWith(".ttc")) {
if (alias != null)
LOGGER.error("You can't define an alias for a true type collection.");
String[] names = BaseFont.enumerateTTCNames(path);
for (int i = 0; i < names.length; i++) {
register(path + "," + i);
}
}
else if (path.toLowerCase().endsWith(".afm") || path.toLowerCase().endsWith(".pfm")) {
BaseFont bf = BaseFont.createFont(path, BaseFont.CP1252, false);
String fullName = bf.getFullFontName()[0][3].toLowerCase();
String familyName = bf.getFamilyFontName()[0][3].toLowerCase();
String psName = bf.getPostscriptFontName().toLowerCase();
registerFamily(familyName, fullName, null);
trueTypeFonts.put(psName, path);
trueTypeFonts.put(fullName, path);
}
if (LOGGER.isLogging(Level.TRACE)) {
LOGGER.trace(String.format("Registered %s", path));
}
}
catch(DocumentException de) {
// this shouldn't happen
throw new ExceptionConverter(de);
}
catch(IOException ioe) {
throw new ExceptionConverter(ioe);
}
}
// remove regular and correct last symbol
// do this job to give higher priority to regular fonts in comparison with light, narrow, etc
// Don't use this method for not regular fonts!
protected boolean saveCopyOfRegularFont(String regularFontName, String path) {
//remove "regular" at the end of the font name
String alias = regularFontName.substring(0, regularFontName.length() - 7).trim();
if (!trueTypeFonts.containsKey(alias)) {
trueTypeFonts.put(alias, path);
return true;
}
return false;
}
/** Register all the fonts in a directory.
* @param dir the directory
* @return the number of fonts registered
*/
public int registerDirectory(final String dir) {
return registerDirectory(dir, false);
}
/**
* Register all the fonts in a directory and possibly its subdirectories.
* @param dir the directory
* @param scanSubdirectories recursively scan subdirectories if <code>true</true>
* @return the number of fonts registered
* @since 2.1.2
*/
public int registerDirectory(final String dir, final boolean scanSubdirectories) {
if (LOGGER.isLogging(Level.DEBUG)) {
LOGGER.debug(String.format("Registering directory %s, looking for fonts", dir));
}
int count = 0;
try {
File file = new File(dir);
if (!file.exists() || !file.isDirectory())
return 0;
String files[] = file.list();
if (files == null)
return 0;
for (int k = 0; k < files.length; ++k) {
try {
file = new File(dir, files[k]);
if (file.isDirectory()) {
if (scanSubdirectories) {
count += registerDirectory(file.getAbsolutePath(), true);
}
} else {
String name = file.getPath();
String suffix = name.length() < 4 ? null : name.substring(name.length() - 4).toLowerCase();
if (".afm".equals(suffix) || ".pfm".equals(suffix)) {
/* Only register Type 1 fonts with matching .pfb files */
File pfb = new File(name.substring(0, name.length() - 4) + ".pfb");
if (pfb.exists()) {
register(name, null);
++count;
}
} else if (".ttf".equals(suffix) || ".otf".equals(suffix) || ".ttc".equals(suffix)) {
register(name, null);
++count;
}
}
}
catch (Exception e) {
//empty on purpose
}
}
}
catch (Exception e) {
//empty on purpose
}
return count;
}
/** Register fonts in some probable directories. It usually works in Windows,
* Linux and Solaris.
* @return the number of fonts registered
*/
public int registerDirectories() {
int count = 0;
String windir = System.getenv("windir");
String fileseparator = System.getProperty("file.separator");
if (windir != null && fileseparator != null) {
count += registerDirectory(windir + fileseparator + "fonts");
}
count += registerDirectory("/usr/share/X11/fonts", true);
count += registerDirectory("/usr/X/lib/X11/fonts", true);
count += registerDirectory("/usr/openwin/lib/X11/fonts", true);
count += registerDirectory("/usr/share/fonts", true);
count += registerDirectory("/usr/X11R6/lib/X11/fonts", true);
count += registerDirectory("/Library/Fonts");
count += registerDirectory("/System/Library/Fonts");
return count;
}
/**
* Gets a set of registered fontnames.
* @return a set of registered fonts
*/
public Set<String> getRegisteredFonts() {
return trueTypeFonts.keySet();
}
/**
* Gets a set of registered fontnames.
* @return a set of registered font families
*/
public Set<String> getRegisteredFamilies() {
return fontFamilies.keySet();
}
/**
* Checks if a certain font is registered.
*
* @param fontname the name of the font that has to be checked.
* @return true if the font is found
*/
public boolean isRegistered(final String fontname) {
return trueTypeFonts.containsKey(fontname.toLowerCase());
}
}
| [
"\"windir\""
]
| []
| [
"windir"
]
| [] | ["windir"] | java | 1 | 0 | |
mail/mail.go | package mail
import (
"log"
"net/smtp"
"os"
)
const recipient = "[email protected]"
// Mail struct
type Mail struct {
Name string
}
// Check verifies working of ASU's mail
func (m Mail) Check() []byte {
var login string
var pass string
if m.Name == "mail" {
login = os.Getenv("MAIL_LOGIN")
pass = os.Getenv("MAIL_PASS")
} else {
login = os.Getenv("MX_LOGIN")
pass = os.Getenv("MX_PASS")
}
auth := smtp.PlainAuth("", login, pass, m.Name+".asu.ru")
to := []string{recipient}
msg := []byte("To: " + recipient + "\r\n" +
"Subject: Mail is working!\r\n" +
"\r\n" +
"There is nothing interesting.\r\n")
err := smtp.SendMail(m.Name+".asu.ru:25", auth, "[email protected]", to, msg)
if err != nil {
log.Println("unable to send email. ", err)
return []byte("false")
}
log.Println("Почта на " + m.Name + ".asu.ru успешно работает!")
return []byte("true")
}
// GetName returns name of the mail server
func (m Mail) GetName() string {
return m.Name
}
| [
"\"MAIL_LOGIN\"",
"\"MAIL_PASS\"",
"\"MX_LOGIN\"",
"\"MX_PASS\""
]
| []
| [
"MAIL_LOGIN",
"MX_LOGIN",
"MAIL_PASS",
"MX_PASS"
]
| [] | ["MAIL_LOGIN", "MX_LOGIN", "MAIL_PASS", "MX_PASS"] | go | 4 | 0 | |
cmd/serviceDispatch/main_test.go | package main
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
"github.com/go-playground/validator"
"github.com/julienschmidt/httprouter"
"github.com/peterpla/lead-expert/pkg/config"
"github.com/peterpla/lead-expert/pkg/database"
"github.com/peterpla/lead-expert/pkg/queue"
)
func TestServiceDispatch(t *testing.T) {
cfg := config.GetConfigPointer()
servicePrefix := "service-dispatch-dot-" // <---- change to match service!!
port := cfg.TaskServiceDispatchPort // <---- change to match service!!
repo = database.NewFirestoreRequestRepository(cfg.ProjectID, cfg.DatabaseRequests)
validate = validator.New()
type test struct {
name string
endpoint string
body string
respBody string
status int
}
jsonBody := fmt.Sprintf("{ \"customer_id\": %7d, \"media_uri\": %q, \"accepted_at\": %q }",
1234567, "gs://elated-practice-224603.appspot.com/audio_uploads/audio-01.mp3", time.Now().UTC().Format(time.RFC3339Nano))
tests := []test{
// valid
{name: "valid POST /task_handler",
endpoint: "/task_handler",
body: jsonBody,
status: http.StatusOK},
}
qi = queue.QueueInfo{}
q = queue.NewNullQueue(&qi) // use null queue, requests thrown away on exit
// q = queue.NewGCTQueue(&qi) // use Google Cloud Tasks
qs = queue.NewService(q)
prefix := fmt.Sprintf("http://localhost:%s", port)
if cfg.IsGAE {
prefix = fmt.Sprintf("https://%s%s.appspot.com", servicePrefix, os.Getenv("PROJECT_ID"))
}
for _, tc := range tests {
url := prefix + tc.endpoint
// log.Printf("Test %s: %s", tc.name, url)
router := httprouter.New()
router.POST("/task_handler", taskHandler(q))
// build the POST request with custom header
theRequest, err := http.NewRequest("POST", url, strings.NewReader(tc.body))
if err != nil {
t.Fatal(err)
}
theRequest.Header.Set("X-Appengine-Taskname", "localTask")
theRequest.Header.Set("X-Appengine-Queuename", "localQueue")
// response recorder
rr := httptest.NewRecorder()
// send the request
router.ServeHTTP(rr, theRequest)
if tc.status != rr.Code {
t.Errorf("%s: %q expected status code %v, got %v", tc.name, tc.endpoint, tc.status, rr.Code)
}
if tc.respBody != "" {
var b []byte
if b, err = ioutil.ReadAll(rr.Body); err != nil {
t.Fatalf("%s: ReadAll error: %v", tc.name, err)
}
t.Errorf("%s: expected blank body, got %q", tc.name, string(b))
}
}
}
| [
"\"PROJECT_ID\""
]
| []
| [
"PROJECT_ID"
]
| [] | ["PROJECT_ID"] | go | 1 | 0 | |
pwkit/environments/casa/util.py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2015-2017 Peter Williams <[email protected]> and collaborators.
# Licensed under the MIT License.
"""This module provides low-level tools and utilities for interacting with the
``casac`` module provided by CASA.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str('''INVERSE_C_MS INVERSE_C_MNS pol_names pol_to_miriad msselect_keys
datadir logger forkandlog sanitize_unicode tools''').split()
import six
from ... import text_type
# Some constants that can be useful.
INVERSE_C_MS = 3.3356409519815204e-09 # inverse speed of light in m/s
INVERSE_C_MNS = 3.3356409519815204 # inverse speed of light in m/ns
pol_names = {
0: '?',
1: 'I', 2: 'Q', 3: 'U', 4: 'V',
5: 'RR', 6: 'RL', 7: 'LR', 8: 'LL',
9: 'XX', 10: 'XY', 11: 'YX', 12: 'YY',
13: 'RX', 14: 'RY', 15: 'LX', 16: 'LY',
17: 'XR', 18: 'XL', 19: 'YR', 20: 'YL',
21: 'PP', 22: 'PQ', 23: 'QP', 24: 'QQ',
25: 'RCirc', 26: 'Lcirc', 27: 'Lin', 28: 'Ptot', 29: 'Plin',
30: 'PFtot', 31: 'PFlin', 32: 'Pang',
}
pol_to_miriad = {
# see mirtask.util for the MIRIAD magic numbers.
1: 1, 2: 2, 3: 3, 4: 4, # IQUV
5: -1, 6: -3, 7: -4, 8: -2, # R/L
9: -5, 10: -7, 11: -8, 12: -6, # X/Y
# rest are inexpressible
}
pol_is_intensity = {
0: False,
1: True, 2: False, 3: False, 4: False, # IQUV
5: True, 6: False, 7: False, 8: True, # RR RL LR LL
9: True, 10: False, 11: False, 12: True, # XX XY YX YY
13: False, 14: False, 15: False, 16: False, # RX RY LX LY
17: False, 18: False, 19: False, 20: False, # XR XL YR YL
21: True, 22: False, 23: False, 24: True, # PP PQ QP QQ
25: False, 26: False, 27: False, 28: False, 29: False,
30: False, 31: False, 32: False,
}
# "polarization" is technically valid as an MS selection, but it pretty much
# doesn't do what you'd want since records generally contain multiple pols.
# ms.selectpolarization() should be used instead. Maybe ditto for spw?
msselect_keys = frozenset('array baseline field observation '
'scan scaninent spw taql time uvdist'.split())
def sanitize_unicode(item):
"""Safely pass string values to the CASA tools.
item
A value to be passed to a CASA tool.
In Python 2, the bindings to CASA tasks expect to receive all string values
as binary data (:class:`str`) and not Unicode. But :mod:`pwkit` often uses
the ``from __future__ import unicode_literals`` statement to prepare for
Python 3 compatibility, and other Python modules are getting better about
using Unicode consistently, so more and more module code ends up using
Unicode strings in cases where they might get exposed to CASA. Doing so
will lead to errors.
This helper function converts Unicode into UTF-8 encoded bytes for
arguments that you might pass to a CASA tool. It will leave non-strings
unchanged and recursively transform collections, so you can safely use it
just about anywhere.
I usually import this as just ``b`` and write ``tool.method(b(arg))``, in
analogy with the ``b''`` byte string syntax. This leads to code such as::
from pwkit.environments.casa.util import tools, sanitize_unicode as b
tb = tools.table()
path = u'data.ms'
tb.open(path) # => raises exception
tb.open(b(path)) # => works
"""
if isinstance(item, text_type):
return item.encode('utf8')
if isinstance(item, dict):
return dict((sanitize_unicode(k), sanitize_unicode(v)) for k, v in six.iteritems(item))
if isinstance(item,(list, tuple)):
return item.__class__(sanitize_unicode(x) for x in item)
from ...io import Path
if isinstance(item, Path):
return str(item)
return item
# Finding the data directory
def datadir(*subdirs):
"""Get a path within the CASA data directory.
subdirs
Extra elements to append to the returned path.
This function locates the directory where CASA resource data files (tables
of time offsets, calibrator models, etc.) are stored. If called with no
arguments, it simply returns that path. If arguments are provided, they are
appended to the returned path using :func:`os.path.join`, making it easy to
construct the names of specific data files. For instance::
from pwkit.environments.casa import util
cal_image_path = util.datadir('nrao', 'VLA', 'CalModels', '3C286_C.im')
tb = util.tools.image()
tb.open(cal_image_path)
"""
import os.path
data = None
if 'CASAPATH' in os.environ:
data = os.path.join(os.environ['CASAPATH'].split()[0], 'data')
if data is None:
# The Conda CASA directory layout:
try:
import casadef
except ImportError:
pass
else:
data = os.path.join(os.path.dirname(casadef.task_directory), 'data')
if not os.path.isdir(data):
# Sigh, hack for CASA 4.7 + Conda; should be straightened out:
dn = os.path.dirname
data = os.path.join(dn(dn(dn(casadef.task_directory))), 'lib', 'casa', 'data')
if not os.path.isdir(data):
data = None
if data is None:
import casac
prevp = None
p = os.path.dirname(casac.__file__)
while len(p) and p != prevp:
data = os.path.join(p, 'data')
if os.path.isdir(data):
break
prevp = p
p = os.path.dirname(p)
if not os.path.isdir(data):
raise RuntimeError('cannot identify CASA data directory')
return os.path.join(data, *subdirs)
# Trying to use the logging facility in a sane way.
#
# As soon as you create a logsink, it creates a file called casapy.log.
# So we do some junk to not leave turds all around the filesystem.
def _rmtree_error(func, path, excinfo):
from ...cli import warn
warn('couldn\'t delete temporary file %s: %s (%s)', path, excinfo[0], func)
def logger(filter='WARN'):
"""Set up CASA to write log messages to standard output.
filter
The log level filter: less urgent messages will not be shown. Valid values
are strings: "DEBUG1", "INFO5", ... "INFO1", "INFO", "WARN", "SEVERE".
This function creates and returns a CASA ”log sink” object that is
configured to write to standard output. The default CASA implementation
would *always* create a file named ``casapy.log`` in the current
directory; this function safely prevents such a file from being left
around. This is particularly important if you don’t have write permissions
to the current directory.
"""
import os, shutil, tempfile
cwd = os.getcwd()
tempdir = None
try:
tempdir = tempfile.mkdtemp(prefix='casautil')
try:
os.chdir(tempdir)
sink = tools.logsink()
sink.setlogfile(sanitize_unicode(os.devnull))
try:
os.unlink('casapy.log')
except OSError as e:
if e.errno != 2:
raise
# otherwise, it's a ENOENT, in which case, no worries.
finally:
os.chdir(cwd)
finally:
if tempdir is not None:
shutil.rmtree(tempdir, onerror=_rmtree_error)
sink.showconsole(True)
sink.setglobal(True)
sink.filter(sanitize_unicode(filter.upper()))
return sink
def forkandlog(function, filter='INFO5', debug=False):
"""Fork a child process and read its CASA log output.
function
A function to run in the child process
filter
The CASA log level filter to apply in the child process: less urgent
messages will not be shown. Valid values are strings: "DEBUG1", "INFO5",
... "INFO1", "INFO", "WARN", "SEVERE".
debug
If true, the standard output and error of the child process are *not*
redirected to /dev/null.
Some CASA tools produce important results that are *only* provided via log
messages. This is a problem for automation, since there’s no way for
Python code to intercept those log messages and extract the results of
interest. This function provides a framework for working around this
limitation: by forking a child process and sending its log output to a
pipe, the parent process can capture the log messages.
This function is a generator. It yields lines from the child process’ CASA
log output.
Because the child process is a fork of the parent, it inherits a complete
clone of the parent’s state at the time of forking. That means that the
*function* argument you pass it can do just about anything you’d do in a
regular program.
The child process’ standard output and error streams are redirected to
``/dev/null`` unless the *debug* argument is true. Note that the CASA log
output is redirected to a pipe that is neither of these streams. So, if
the function raises an unhandled Python exception, the Python traceback
will not pollute the CASA log output. But, by the same token, the calling
program will not be able to detect that the exception occurred except by
its impact on the expected log output.
"""
import sys, os
readfd, writefd = os.pipe()
pid = os.fork()
if pid == 0:
# Child process. We never leave this branch.
#
# Log messages of priority >WARN are sent to stderr regardless of the
# status of log.showconsole(). The idea is for this subprocess to be
# something super lightweight and constrained, so it seems best to
# nullify stderr, and stdout, to not pollute the output of the calling
# process.
#
# I thought of using the default logger() setup and dup2'ing stderr to
# the pipe fd, but then if anything else gets printed to stderr (e.g.
# Python exception info), it'll get sent along the pipe too. The
# caller would have to be much more complex to be able to detect and
# handle such output.
os.close(readfd)
if not debug:
f = open(os.devnull, 'w')
os.dup2(f.fileno(), 1)
os.dup2(f.fileno(), 2)
sink = logger(filter=filter)
sink.setlogfile(b'/dev/fd/%d' % writefd)
function(sink)
sys.exit(0)
# Original process.
os.close(writefd)
with os.fdopen(readfd) as readhandle:
for line in readhandle:
yield line
info = os.waitpid(pid, 0)
if info[1]:
# Because we're a generator, this is the only way for us to signal if
# the process died. We could be rewritten as a context manager.
e = RuntimeError('logging child process PID %d exited '
'with error code %d' % tuple(info))
e.pid, e.exitcode = info
raise e
# Tool factories.
class _Tools(object):
"""This class is structured so that it supports useful tab-completion
interactively, but also so that new tools can be constructed if the
underlying library provides them.
"""
_builtinNames = '''agentflagger atmosphere calanalysis calibrater calplot
componentlist coordsys deconvolver fitter flagger
functional image imagepol imager logsink measures
msmetadata ms msplot mstransformer plotms regionmanager
simulator spectralline quanta table tableplot utils
vlafiller vpmanager'''.split()
def __getattribute__(self, n):
"""Returns factories, not instances."""
# We need to make this __getattribute__, not __getattr__, only because
# we set the builtin names in the class __dict__ to enable tab-completion.
import casac
if hasattr(casac, 'casac'): # casapy >= 4.0?
t = getattr(casac.casac, n, None)
if t is None:
raise AttributeError('tool "%s" not present' % n)
return t
else:
try:
return casac.homefinder.find_home_by_name(n + 'Home').create
except Exception:
# raised exception is class 'homefinder.error'; it appears unavailable
# on the Python layer
raise AttributeError('tool "%s" not present' % n)
for n in _Tools._builtinNames:
setattr(_Tools, n, None) # ease autocompletion
tools = _Tools()
| []
| []
| [
"CASAPATH"
]
| [] | ["CASAPATH"] | python | 1 | 0 | |
main.go | package main
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"runtime"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/fabiolb/fabio/admin"
"github.com/fabiolb/fabio/cert"
"github.com/fabiolb/fabio/config"
"github.com/fabiolb/fabio/exit"
"github.com/fabiolb/fabio/iam"
"github.com/fabiolb/fabio/logger"
"github.com/fabiolb/fabio/metrics"
"github.com/fabiolb/fabio/proxy"
"github.com/fabiolb/fabio/proxy/tcp"
"github.com/fabiolb/fabio/registry"
"github.com/fabiolb/fabio/registry/consul"
"github.com/fabiolb/fabio/registry/file"
"github.com/fabiolb/fabio/registry/static"
"github.com/fabiolb/fabio/route"
"github.com/pkg/profile"
dmp "github.com/sergi/go-diff/diffmatchpatch"
)
// version contains the version number
//
// It is set by build/release.sh for tagged releases
// so that 'go get' just works.
//
// It is also set by the linker when fabio
// is built via the Makefile or the build/docker.sh
// script to ensure the correct version nubmer
var version = "1.5.1"
var shuttingDown int32
func main() {
cfg, err := config.Load(os.Args, os.Environ())
if err != nil {
exit.Fatalf("[FATAL] %s. %s", version, err)
}
if cfg == nil {
fmt.Println(version)
return
}
log.Printf("[INFO] Runtime config\n" + toJSON(cfg))
log.Printf("[INFO] Version %s starting", version)
log.Printf("[INFO] Go runtime is %s", runtime.Version())
// setup profiling if enabled
var prof interface {
Stop()
}
if cfg.ProfileMode != "" {
var mode func(*profile.Profile)
switch cfg.ProfileMode {
case "":
// do nothing
case "cpu":
mode = profile.CPUProfile
case "mem":
mode = profile.MemProfile
case "mutex":
mode = profile.MutexProfile
case "block":
mode = profile.BlockProfile
default:
log.Fatalf("[FATAL] Invalid profile mode %q", cfg.ProfileMode)
}
prof = profile.Start(mode, profile.ProfilePath(cfg.ProfilePath), profile.NoShutdownHook)
log.Printf("[INFO] Profile mode %q", cfg.ProfileMode)
log.Printf("[INFO] Profile path %q", cfg.ProfilePath)
}
exit.Listen(func(s os.Signal) {
atomic.StoreInt32(&shuttingDown, 1)
proxy.Shutdown(cfg.Proxy.ShutdownWait)
if prof != nil {
prof.Stop()
}
if registry.Default == nil {
return
}
registry.Default.Deregister()
})
// init metrics early since that create the global metric registries
// that are used by other parts of the code.
initMetrics(cfg)
initRuntime(cfg)
initBackend(cfg)
startAdmin(cfg)
first := make(chan bool)
go watchBackend(cfg, first)
log.Print("[INFO] Waiting for first routing table")
<-first
// create proxies after metrics since they use the metrics registry.
startServers(cfg)
exit.Wait()
log.Print("[INFO] Down")
}
func newHTTPProxy(cfg *config.Config) http.Handler {
var w io.Writer
switch cfg.Log.AccessTarget {
case "":
log.Printf("[INFO] Access logging disabled")
case "stdout":
log.Printf("[INFO] Writing access log to stdout")
w = os.Stdout
default:
exit.Fatal("[FATAL] Invalid access log target ", cfg.Log.AccessTarget)
}
format := cfg.Log.AccessFormat
switch format {
case "common":
format = logger.CommonFormat
case "combined":
format = logger.CombinedFormat
}
l, err := logger.New(w, format)
if err != nil {
exit.Fatal("[FATAL] Invalid log format: ", err)
}
var aaa iam.IAM
if cfg.Auth.Enabled {
if aaa, err = iam.New(cfg.Auth); err != nil {
exit.Fatal("[FATAL] Failed to initialize auth: ", err)
}
}
pick := route.Picker[cfg.Proxy.Strategy]
match := route.Matcher[cfg.Proxy.Matcher]
notFound := metrics.DefaultRegistry.GetCounter("notfound")
log.Printf("[INFO] Using routing strategy %q", cfg.Proxy.Strategy)
log.Printf("[INFO] Using route matching %q", cfg.Proxy.Matcher)
newTransport := func(tlscfg *tls.Config) *http.Transport {
return &http.Transport{
ResponseHeaderTimeout: cfg.Proxy.ResponseHeaderTimeout,
MaxIdleConnsPerHost: cfg.Proxy.MaxConn,
Dial: (&net.Dialer{
Timeout: cfg.Proxy.DialTimeout,
KeepAlive: cfg.Proxy.KeepAliveTimeout,
}).Dial,
TLSClientConfig: tlscfg,
}
}
return &proxy.HTTPProxy{
Config: cfg.Proxy,
Transport: newTransport(nil),
InsecureTransport: newTransport(&tls.Config{InsecureSkipVerify: true}),
Lookup: func(r *http.Request) *route.Target {
t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match)
if t == nil {
notFound.Inc(1)
log.Print("[WARN] No route for ", r.Host, r.URL)
}
return t
},
Requests: metrics.DefaultRegistry.GetTimer("requests"),
Noroute: metrics.DefaultRegistry.GetCounter("notfound"),
Logger: l,
IAM: aaa,
}
}
func lookupHostFn(cfg *config.Config) func(string) string {
pick := route.Picker[cfg.Proxy.Strategy]
notFound := metrics.DefaultRegistry.GetCounter("notfound")
return func(host string) string {
t := route.GetTable().LookupHost(host, pick)
if t == nil {
notFound.Inc(1)
log.Print("[WARN] No route for ", host)
return ""
}
return t.URL.Host
}
}
func makeTLSConfig(l config.Listen) (*tls.Config, error) {
if l.CertSource.Name == "" {
return nil, nil
}
src, err := cert.NewSource(l.CertSource)
if err != nil {
return nil, fmt.Errorf("Failed to create cert source %s. %s", l.CertSource.Name, err)
}
tlscfg, err := cert.TLSConfig(src, l.StrictMatch, l.TLSMinVersion, l.TLSMaxVersion, l.TLSCiphers)
if err != nil {
return nil, fmt.Errorf("[FATAL] Failed to create TLS config for cert source %s. %s", l.CertSource.Name, err)
}
return tlscfg, nil
}
func startAdmin(cfg *config.Config) {
log.Printf("[INFO] Admin server access mode %q", cfg.UI.Access)
log.Printf("[INFO] Admin server listening on %q", cfg.UI.Listen.Addr)
go func() {
l := cfg.UI.Listen
tlscfg, err := makeTLSConfig(l)
if err != nil {
exit.Fatal("[FATAL] ", err)
}
srv := &admin.Server{
Access: cfg.UI.Access,
Color: cfg.UI.Color,
Title: cfg.UI.Title,
Version: version,
Commands: route.Commands,
Cfg: cfg,
}
if err := srv.ListenAndServe(l, tlscfg); err != nil {
exit.Fatal("[FATAL] ui: ", err)
}
}()
}
func startServers(cfg *config.Config) {
for _, l := range cfg.Listen {
l := l // capture loop var for go routines below
tlscfg, err := makeTLSConfig(l)
if err != nil {
exit.Fatal("[FATAL] ", err)
}
log.Printf("[INFO] %s proxy listening on %s", strings.ToUpper(l.Proto), l.Addr)
if tlscfg != nil && tlscfg.ClientAuth == tls.RequireAndVerifyClientCert {
log.Printf("[INFO] Client certificate authentication enabled on %s", l.Addr)
}
switch l.Proto {
case "http", "https":
go func() {
h := newHTTPProxy(cfg)
if err := proxy.ListenAndServeHTTP(l, h, tlscfg); err != nil {
exit.Fatal("[FATAL] ", err)
}
}()
case "tcp":
go func() {
h := &tcp.Proxy{cfg.Proxy.DialTimeout, lookupHostFn(cfg)}
if err := proxy.ListenAndServeTCP(l, h, tlscfg); err != nil {
exit.Fatal("[FATAL] ", err)
}
}()
case "tcp+sni":
go func() {
h := &tcp.SNIProxy{cfg.Proxy.DialTimeout, lookupHostFn(cfg)}
if err := proxy.ListenAndServeTCP(l, h, tlscfg); err != nil {
exit.Fatal("[FATAL] ", err)
}
}()
default:
exit.Fatal("[FATAL] Invalid protocol ", l.Proto)
}
}
}
func initMetrics(cfg *config.Config) {
if cfg.Metrics.Target == "" {
log.Printf("[INFO] Metrics disabled")
return
}
var err error
if metrics.DefaultRegistry, err = metrics.NewRegistry(cfg.Metrics); err != nil {
exit.Fatal("[FATAL] ", err)
}
if route.ServiceRegistry, err = metrics.NewRegistry(cfg.Metrics); err != nil {
exit.Fatal("[FATAL] ", err)
}
}
func initRuntime(cfg *config.Config) {
if os.Getenv("GOGC") == "" {
log.Print("[INFO] Setting GOGC=", cfg.Runtime.GOGC)
debug.SetGCPercent(cfg.Runtime.GOGC)
} else {
log.Print("[INFO] Using GOGC=", os.Getenv("GOGC"), " from env")
}
if os.Getenv("GOMAXPROCS") == "" {
log.Print("[INFO] Setting GOMAXPROCS=", cfg.Runtime.GOMAXPROCS)
runtime.GOMAXPROCS(cfg.Runtime.GOMAXPROCS)
} else {
log.Print("[INFO] Using GOMAXPROCS=", os.Getenv("GOMAXPROCS"), " from env")
}
}
func initBackend(cfg *config.Config) {
var deadline = time.Now().Add(cfg.Registry.Timeout)
var err error
for {
switch cfg.Registry.Backend {
case "file":
registry.Default, err = file.NewBackend(cfg.Registry.File.Path)
case "static":
registry.Default, err = static.NewBackend(cfg.Registry.Static.Routes)
case "consul":
registry.Default, err = consul.NewBackend(&cfg.Registry.Consul)
default:
exit.Fatal("[FATAL] Unknown registry backend ", cfg.Registry.Backend)
}
if err == nil {
if err = registry.Default.Register(); err == nil {
return
}
}
log.Print("[WARN] Error initializing backend. ", err)
if time.Now().After(deadline) {
exit.Fatal("[FATAL] Timeout registering backend.")
}
time.Sleep(cfg.Registry.Retry)
if atomic.LoadInt32(&shuttingDown) > 0 {
exit.Exit(1)
}
}
}
func watchBackend(cfg *config.Config, first chan bool) {
var (
last string
svccfg string
mancfg string
once sync.Once
)
svc := registry.Default.WatchServices()
man := registry.Default.WatchManual()
for {
select {
case svccfg = <-svc:
case mancfg = <-man:
}
// manual config overrides service config
// order matters
next := svccfg + "\n" + mancfg
if next == last {
continue
}
t, err := route.NewTable(next)
if err != nil {
log.Printf("[WARN] %s", err)
continue
}
route.SetTable(t)
logRoutes(t, last, next, cfg.Log.RoutesFormat)
last = next
once.Do(func() { close(first) })
}
}
func logRoutes(t route.Table, last, next, format string) {
fmtDiff := func(diffs []dmp.Diff) string {
var b bytes.Buffer
for _, d := range diffs {
t := strings.TrimSpace(d.Text)
if t == "" {
continue
}
switch d.Type {
case dmp.DiffDelete:
b.WriteString("- ")
b.WriteString(strings.Replace(t, "\n", "\n- ", -1))
case dmp.DiffInsert:
b.WriteString("+ ")
b.WriteString(strings.Replace(t, "\n", "\n+ ", -1))
}
}
return b.String()
}
const defFormat = "delta"
switch format {
case "detail":
log.Printf("[INFO] Updated config to\n%s", t.Dump())
case "delta":
if delta := fmtDiff(dmp.New().DiffMain(last, next, true)); delta != "" {
log.Printf("[INFO] Config updates\n%s", delta)
}
case "all":
log.Printf("[INFO] Updated config to\n%s", next)
default:
log.Printf("[WARN] Invalid route format %q. Defaulting to %q", format, defFormat)
logRoutes(t, last, next, defFormat)
}
}
func toJSON(v interface{}) string {
data, err := json.MarshalIndent(v, "", " ")
if err != nil {
panic("json: " + err.Error())
}
return string(data)
}
| [
"\"GOGC\"",
"\"GOGC\"",
"\"GOMAXPROCS\"",
"\"GOMAXPROCS\""
]
| []
| [
"GOGC",
"GOMAXPROCS"
]
| [] | ["GOGC", "GOMAXPROCS"] | go | 2 | 0 | |
autosub/__init__.py | """
Defines autosub's main functionality.
"""
# !/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import audioop
import math
import multiprocessing
import os
import subprocess
import sys
import tempfile
import wave
import requests
import execjs
import ssl
import json
ssl._create_default_https_context = ssl._create_unverified_context
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from googleapiclient.discovery import build
from progressbar import ProgressBar, Percentage, Bar, ETA
from autosub.constants import (
LANGUAGE_CODES, GOOGLE_SPEECH_API_KEY, GOOGLE_SPEECH_API_URL,
)
from autosub.formatters import FORMATTERS
DEFAULT_SUBTITLE_FORMAT = 'srt'
DEFAULT_CONCURRENCY = 10
DEFAULT_SRC_LANGUAGE = 'en'
DEFAULT_DST_LANGUAGE = 'en'
def percentile(arr, percent):
"""
Calculate the given percentile of arr.
"""
arr = sorted(arr)
index = (len(arr) - 1) * percent
floor = math.floor(index)
ceil = math.ceil(index)
if floor == ceil:
return arr[int(index)]
low_value = arr[int(floor)] * (ceil - index)
high_value = arr[int(ceil)] * (index - floor)
return low_value + high_value
class FLACConverter(object): # pylint: disable=too-few-public-methods
"""
Class for converting a region of an input audio or video file into a FLAC audio file
"""
def __init__(self, source_path, include_before=0.25, include_after=0.25):
self.source_path = source_path
self.include_before = include_before
self.include_after = include_after
def __call__(self, region):
try:
start, end = region
start = max(0, start - self.include_before)
end += self.include_after
temp = tempfile.NamedTemporaryFile(suffix='.flac', delete=False)
command = ["ffmpeg", "-ss", str(start), "-t", str(end - start),
"-y", "-i", self.source_path,
"-loglevel", "error", temp.name]
use_shell = True if os.name == "nt" else False
subprocess.check_output(command, stdin=open(os.devnull), shell=use_shell)
read_data = temp.read()
temp.close()
os.unlink(temp.name)
return read_data
except KeyboardInterrupt:
return None
class SpeechRecognizer(object): # pylint: disable=too-few-public-methods
"""
Class for performing speech-to-text for an input FLAC file.
"""
def __init__(self, language="en", rate=44100, retries=3, api_key=GOOGLE_SPEECH_API_KEY):
self.language = language
self.rate = rate
self.api_key = api_key
self.retries = retries
def __call__(self, data):
gTrans = GoogleTrans()
try:
for _ in range(self.retries):
line = gTrans.query(data)
return line
# url = GOOGLE_SPEECH_API_URL.format(lang=self.language, key=self.api_key)
# headers = {"Content-Type": "audio/x-flac; rate=%d" % self.rate}
#
# try:
# resp = requests.post(url, data=data, headers=headers)
# except requests.exceptions.ConnectionError:
# continue
#
# for line in resp.content.decode('utf-8').split("\n"):
# try:
# line = json.loads(line)
# line = line['result'][0]['alternative'][0]['transcript']
# return line[:1].upper() + line[1:]
# except IndexError:
# # no result
# continue
# except JSONDecodeError:
# continue
except KeyboardInterrupt:
return None
class Translator(object): # pylint: disable=too-few-public-methods
"""
Class for translating a sentence from a one language to another.
"""
def __init__(self, language, api_key, src, dst):
self.language = language
self.api_key = api_key
self.service = build('translate', 'v2',
developerKey=self.api_key)
self.src = src
self.dst = dst
def __call__(self, sentence):
try:
if not sentence:
return None
gTrans = GoogleTrans()
result = gTrans.query(sentence)
return result
# result = self.service.translations().list( # pylint: disable=no-member
# source=self.src,
# target=self.dst,
# q=[sentence]
# ).execute()
#
# if 'translations' in result and result['translations'] and \
# 'translatedText' in result['translations'][0]:
# return result['translations'][0]['translatedText']
#
# return None
except KeyboardInterrupt:
return None
def which(program):
"""
Return the path for a given executable.
"""
def is_exe(file_path):
"""
Checks whether a file is executable.
"""
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def extract_audio(filename, channels=1, rate=16000):
"""
Extract audio from an input file to a temporary WAV file.
"""
temp = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
if not os.path.isfile(filename):
print("The given file does not exist: {}".format(filename))
raise Exception("Invalid filepath: {}".format(filename))
if not which("ffmpeg"):
print("ffmpeg: Executable not found on machine.")
raise Exception("Dependency not found: ffmpeg")
command = ["ffmpeg", "-y", "-i", filename,
"-ac", str(channels), "-ar", str(rate),
"-loglevel", "error", temp.name]
use_shell = True if os.name == "nt" else False
subprocess.check_output(command, stdin=open(os.devnull), shell=use_shell)
return temp.name, rate
def find_speech_regions(filename, frame_width=4096, min_region_size=0.5,
max_region_size=6): # pylint: disable=too-many-locals
"""
Perform voice activity detection on a given audio file.
"""
reader = wave.open(filename)
sample_width = reader.getsampwidth()
rate = reader.getframerate()
n_channels = reader.getnchannels()
chunk_duration = float(frame_width) / rate
n_chunks = int(math.ceil(reader.getnframes() * 1.0 / frame_width))
energies = []
for _ in range(n_chunks):
chunk = reader.readframes(frame_width)
energies.append(audioop.rms(chunk, sample_width * n_channels))
threshold = percentile(energies, 0.2)
elapsed_time = 0
regions = []
region_start = None
for energy in energies:
is_silence = energy <= threshold
max_exceeded = region_start and elapsed_time - region_start >= max_region_size
if (max_exceeded or is_silence) and region_start:
if elapsed_time - region_start >= min_region_size:
regions.append((region_start, elapsed_time))
region_start = None
elif (not region_start) and (not is_silence):
region_start = elapsed_time
elapsed_time += chunk_duration
return regions
def generate_subtitles( # pylint: disable=too-many-locals,too-many-arguments
source_path,
output=None,
concurrency=DEFAULT_CONCURRENCY,
src_language=DEFAULT_SRC_LANGUAGE,
dst_language=DEFAULT_DST_LANGUAGE,
subtitle_file_format=DEFAULT_SUBTITLE_FORMAT,
api_key=None,
):
"""
Given an input audio/video file, generate subtitles in the specified language and format.
"""
audio_filename, audio_rate = extract_audio(source_path)
regions = find_speech_regions(audio_filename)
pool = multiprocessing.Pool(concurrency)
converter = FLACConverter(source_path=audio_filename)
recognizer = SpeechRecognizer(language=src_language, rate=audio_rate,
api_key=GOOGLE_SPEECH_API_KEY)
transcripts = []
if regions:
try:
widgets = ["Converting speech regions to FLAC files: ", Percentage(), ' ', Bar(), ' ',
ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(regions)).start()
extracted_regions = []
for i, extracted_region in enumerate(pool.imap(converter, regions)):
extracted_regions.append(extracted_region)
pbar.update(i)
pbar.finish()
widgets = ["Performing speech recognition: ", Percentage(), ' ', Bar(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(regions)).start()
for i, transcript in enumerate(pool.imap(recognizer, extracted_regions)):
transcripts.append(transcript)
pbar.update(i)
pbar.finish()
if src_language.split("-")[0] != dst_language.split("-")[0]:
if api_key:
google_translate_api_key = api_key
translator = Translator(dst_language, google_translate_api_key,
dst=dst_language,
src=src_language)
prompt = "Translating from {0} to {1}: ".format(src_language, dst_language)
widgets = [prompt, Percentage(), ' ', Bar(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(regions)).start()
translated_transcripts = []
for i, transcript in enumerate(pool.imap(translator, transcripts)):
translated_transcripts.append(transcript)
pbar.update(i)
pbar.finish()
transcripts = translated_transcripts
else:
print(
"Error: Subtitle translation requires specified Google Translate API key. "
"See --help for further information."
)
return 1
except KeyboardInterrupt:
pbar.finish()
pool.terminate()
pool.join()
print("Cancelling transcription")
raise
timed_subtitles = [(r, t) for r, t in zip(regions, transcripts) if t]
formatter = FORMATTERS.get(subtitle_file_format)
formatted_subtitles = formatter(timed_subtitles)
dest = output
if not dest:
base = os.path.splitext(source_path)[0]
dest = "{base}.{format}".format(base=base, format=subtitle_file_format)
with open(dest, 'wb') as output_file:
output_file.write(formatted_subtitles.encode("utf-8"))
os.remove(audio_filename)
return dest
def validate(args):
"""
Check that the CLI arguments passed to autosub are valid.
"""
if args.format not in FORMATTERS:
print(
"Subtitle format not supported. "
"Run with --list-formats to see all supported formats."
)
return False
if args.src_language not in LANGUAGE_CODES.keys():
print(
"Source language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if args.dst_language not in LANGUAGE_CODES.keys():
print(
"Destination language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if not args.source_path:
print("Error: You need to specify a source path.")
return False
return True
def main():
"""
Run autosub as a command-line program.
"""
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio file to subtitle",
nargs='?')
parser.add_argument('-C', '--concurrency', help="Number of concurrent API requests to make",
type=int, default=DEFAULT_CONCURRENCY)
parser.add_argument('-o', '--output',
help="Output path for subtitles (by default, subtitles are saved in \
the same directory and name as the source path)")
parser.add_argument('-F', '--format', help="Destination subtitle format",
default=DEFAULT_SUBTITLE_FORMAT)
parser.add_argument('-S', '--src-language', help="Language spoken in source file",
default=DEFAULT_SRC_LANGUAGE)
parser.add_argument('-D', '--dst-language', help="Desired language for the subtitles",
default=DEFAULT_DST_LANGUAGE)
parser.add_argument('-K', '--api-key',
help="The Google Translate API key to be used. \
(Required for subtitle translation)")
parser.add_argument('--list-formats', help="List all available subtitle formats",
action='store_true')
parser.add_argument('--list-languages', help="List all available source/destination languages",
action='store_true')
args = parser.parse_args()
if args.list_formats:
print("List of formats:")
for subtitle_format in FORMATTERS:
print("{format}".format(format=subtitle_format))
return 0
if args.list_languages:
print("List of all languages:")
for code, language in sorted(LANGUAGE_CODES.items()):
print("{code}\t{language}".format(code=code, language=language))
return 0
if not validate(args):
return 1
try:
subtitle_file_path = generate_subtitles(
source_path=args.source_path,
concurrency=args.concurrency,
src_language=args.src_language,
dst_language=args.dst_language,
api_key=args.api_key,
subtitle_file_format=args.format,
output=args.output,
)
print("Subtitles file created at {}".format(subtitle_file_path))
except KeyboardInterrupt:
return 1
return 0
class Py4Js:
def __init__(self):
self.ctx = execjs.compile("""
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
""")
def getTk(self, text):
return self.ctx.call("TL", text)
class GoogleTrans:
def translate(self, tk, content):
if len(content) > 4891:
print("翻译的长度超过限制!!!")
return
param = {'tk': tk, 'q': content}
result = requests.get(
"""http://translate.google.cn/translate_a/single?client=t&tl=zh-CN&hl=zh-CN&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&clearbtn=1&otf=1&pc=1&srcrom=0&ssel=0&tsel=0&kc=2&sl=auto""",
params=param)
try:
json.loads(result.text, encoding='utf-8')
except ValueError:
return None
res = result.json()
return res[0][0][0]
def query(self, content):
js = Py4Js()
tk = js.getTk(content)
return self.translate(tk, content)
def is_json(self, myjson):
try:
json_object = json.loads(myjson)
except ValueError as e:
return False
return True
if __name__ == '__main__':
sys.exit(main())
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
integrationtest/integrationtest_test.go | package integrationtest_test
import (
"context"
"fmt"
"os"
"testing"
"github.com/jackc/pgxsql/integrationtest"
"github.com/jackc/pgx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var conn *pgx.Conn
func TestMain(m *testing.M) {
databaseURI := os.Getenv("TEST_DATABASE_URI")
if databaseURI == "" {
fmt.Fprintln(os.Stderr, "TEST_DATABASE_URI environment variable is required but not set")
os.Exit(1)
}
connConfig, err := pgx.ParseURI(databaseURI)
if err != nil {
fmt.Fprintln(os.Stderr, "unable to parse TEST_DATABASE_URI:", err)
os.Exit(1)
}
conn, err = pgx.Connect(connConfig)
if err != nil {
fmt.Fprintln(os.Stderr, "unable to connect to PostgreSQL server:", err)
os.Exit(1)
}
os.Exit(m.Run())
}
func TestDeleteRow(t *testing.T) {
tx, err := conn.Begin()
require.NoError(t, err)
defer tx.Rollback()
commandTag, err := integrationtest.DeletePerson(context.Background(), tx, 1)
assert.NoError(t, err)
assert.Equal(t, "DELETE 1", string(commandTag))
rowFound := false
err = conn.QueryRow("select true from person where id=$1", 1).Scan(&rowFound)
assert.Equal(t, pgx.ErrNoRows, err)
assert.False(t, rowFound)
}
| [
"\"TEST_DATABASE_URI\""
]
| []
| [
"TEST_DATABASE_URI"
]
| [] | ["TEST_DATABASE_URI"] | go | 1 | 0 | |
store/redis/redis.go | package redis
import (
"fmt"
"os"
redis "github.com/go-redis/redis"
)
const localhost = "127.0.0.1:6379"
// New returns a checkpoint that uses Redis for underlying storage
func New(appName string, opts ...Option) (*Checkpoint, error) {
if appName == "" {
return nil, fmt.Errorf("must provide app name")
}
c := &Checkpoint{
appName: appName,
}
// override defaults
for _, opt := range opts {
opt(c)
}
// default client if none provided
if c.client == nil {
addr := os.Getenv("REDIS_URL")
if addr == "" {
addr = localhost
}
client := redis.NewClient(&redis.Options{Addr: addr})
c.client = client
}
// verify we can ping server
_, err := c.client.Ping().Result()
if err != nil {
return nil, err
}
return c, nil
}
// Checkpoint stores and retreives the last evaluated key from a DDB scan
type Checkpoint struct {
appName string
client *redis.Client
}
// GetCheckpoint fetches the checkpoint for a particular Shard.
func (c *Checkpoint) GetCheckpoint(streamName, shardID string) (string, error) {
val, _ := c.client.Get(c.key(streamName, shardID)).Result()
return val, nil
}
// SetCheckpoint stores a checkpoint for a shard (e.g. sequence number of last record processed by application).
// Upon failover, record processing is resumed from this point.
func (c *Checkpoint) SetCheckpoint(streamName, shardID, sequenceNumber string) error {
if sequenceNumber == "" {
return fmt.Errorf("sequence number should not be empty")
}
err := c.client.Set(c.key(streamName, shardID), sequenceNumber, 0).Err()
if err != nil {
return err
}
return nil
}
// key generates a unique Redis key for storage of Checkpoint.
func (c *Checkpoint) key(streamName, shardID string) string {
return fmt.Sprintf("%v:checkpoint:%v:%v", c.appName, streamName, shardID)
}
| [
"\"REDIS_URL\""
]
| []
| [
"REDIS_URL"
]
| [] | ["REDIS_URL"] | go | 1 | 0 | |
cmd/run.go | package cmd
import (
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/chanzuckerberg/blessclient/pkg/bless"
"github.com/chanzuckerberg/blessclient/pkg/config"
cziSSH "github.com/chanzuckerberg/blessclient/pkg/ssh"
cziAWS "github.com/chanzuckerberg/go-misc/aws"
oidc "github.com/chanzuckerberg/go-misc/oidc_cli"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const (
flagForce = "force"
)
func init() {
runCmd.Flags().BoolP(flagForce, "f", false, "Force certificate refresh")
rootCmd.AddCommand(runCmd)
}
var runCmd = &cobra.Command{
Use: "run",
Short: "run requests a certificate",
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
force, err := cmd.Flags().GetBool(flagForce)
if err != nil {
return errors.Wrap(err, "Missing force flag")
}
config, err := config.FromFile(config.DefaultConfigFile)
if err != nil {
return err
}
a, err := cziSSH.GetSSHAgent(os.Getenv("SSH_AUTH_SOCK"))
if err != nil {
return err
}
defer a.Close()
manager := cziSSH.NewAgentKeyManager(a)
hasCert, err := manager.HasValidCertificate()
if err != nil {
return err
}
if !force && hasCert {
logrus.Debug("fresh cert, nothing to do")
return nil
}
pub, priv, err := manager.GetKey()
if err != nil {
return err
}
sess, err := session.NewSession()
if err != nil {
return errors.Wrap(err, "could not initialize AWS session")
}
stsSvc := sts.New(sess)
credsProvider, err := oidc.NewAwsOIDCCredsProvider(
cmd.Context(),
stsSvc,
&oidc.AwsOIDCCredsProviderConfig{
AWSRoleARN: config.ClientConfig.RoleARN,
OIDCClientID: config.ClientConfig.OIDCClientID,
OIDCIssuerURL: config.ClientConfig.OIDCIssuerURL,
},
)
if err != nil {
return err
}
token, err := credsProvider.FetchOIDCToken(cmd.Context())
if err != nil {
return err
}
awsConf := aws.NewConfig().WithCredentials(credsProvider.Credentials).WithRegion("us-west-2")
awsClient := cziAWS.New(sess).WithLambda(awsConf)
client := bless.NewOIDC(awsClient, &config.LambdaConfig)
cert, err := client.RequestCert(
cmd.Context(),
awsClient,
&bless.SigningRequest{
PublicKeyToSign: bless.NewPublicKeyToSign(pub),
Identity: bless.Identity{
OktaAccessToken: &bless.OktaAccessTokenInput{
AccessToken: token.AccessToken,
},
},
},
)
if err != nil {
return err
}
err = manager.WriteKey(priv, cert)
if err != nil {
return err
}
hasCert, err = manager.HasValidCertificate()
if err != nil {
return err
}
if !hasCert {
return errors.Errorf("wrote error to key manager, but could not fetch it back")
}
return nil
},
}
| [
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK"
]
| [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
client/api/api.go | // Package api is an API Gateway
package api
import (
"fmt"
"net/http"
"os"
"github.com/go-acme/lego/v3/providers/dns/cloudflare"
"github.com/gorilla/mux"
"github.com/micro/cli/v2"
"github.com/micro/go-micro/v2"
ahandler "github.com/micro/go-micro/v2/api/handler"
aapi "github.com/micro/go-micro/v2/api/handler/api"
"github.com/micro/go-micro/v2/api/handler/event"
ahttp "github.com/micro/go-micro/v2/api/handler/http"
arpc "github.com/micro/go-micro/v2/api/handler/rpc"
"github.com/micro/go-micro/v2/api/handler/web"
"github.com/micro/go-micro/v2/api/resolver"
"github.com/micro/go-micro/v2/api/resolver/grpc"
"github.com/micro/go-micro/v2/api/resolver/host"
"github.com/micro/go-micro/v2/api/resolver/path"
"github.com/micro/go-micro/v2/api/router"
regRouter "github.com/micro/go-micro/v2/api/router/registry"
"github.com/micro/go-micro/v2/api/server"
"github.com/micro/go-micro/v2/api/server/acme"
"github.com/micro/go-micro/v2/api/server/acme/autocert"
"github.com/micro/go-micro/v2/api/server/acme/certmagic"
httpapi "github.com/micro/go-micro/v2/api/server/http"
log "github.com/micro/go-micro/v2/logger"
"github.com/micro/go-micro/v2/sync/memory"
"github.com/micro/micro/v2/client/api/auth"
"github.com/micro/micro/v2/internal/handler"
"github.com/micro/micro/v2/internal/helper"
rrmicro "github.com/micro/micro/v2/internal/resolver/api"
"github.com/micro/micro/v2/internal/stats"
)
var (
Name = "go.micro.api"
Address = ":8080"
Handler = "meta"
Resolver = "micro"
RPCPath = "/rpc"
APIPath = "/"
ProxyPath = "/{service:[a-zA-Z0-9]+}"
Namespace = "go.micro"
HeaderPrefix = "X-Micro-"
EnableRPC = false
ACMEProvider = "autocert"
ACMEChallengeProvider = "cloudflare"
ACMECA = acme.LetsEncryptProductionCA
)
func Run(ctx *cli.Context, srvOpts ...micro.Option) {
log.Init(log.WithFields(map[string]interface{}{"service": "api"}))
if len(ctx.String("server_name")) > 0 {
Name = ctx.String("server_name")
}
if len(ctx.String("address")) > 0 {
Address = ctx.String("address")
}
if len(ctx.String("handler")) > 0 {
Handler = ctx.String("handler")
}
if len(ctx.String("resolver")) > 0 {
Resolver = ctx.String("resolver")
}
if len(ctx.String("enable_rpc")) > 0 {
EnableRPC = ctx.Bool("enable_rpc")
}
if len(ctx.String("acme_provider")) > 0 {
ACMEProvider = ctx.String("acme_provider")
}
if len(ctx.String("namespace")) > 0 {
Namespace = ctx.String("namespace")
}
// append name to opts
srvOpts = append(srvOpts, micro.Name(Name))
// initialise service
service := micro.NewService(srvOpts...)
// Init API
var opts []server.Option
if ctx.Bool("enable_acme") {
hosts := helper.ACMEHosts(ctx)
opts = append(opts, server.EnableACME(true))
opts = append(opts, server.ACMEHosts(hosts...))
switch ACMEProvider {
case "autocert":
opts = append(opts, server.ACMEProvider(autocert.NewProvider()))
case "certmagic":
if ACMEChallengeProvider != "cloudflare" {
log.Fatal("The only implemented DNS challenge provider is cloudflare")
}
apiToken := os.Getenv("CF_API_TOKEN")
if len(apiToken) == 0 {
log.Fatal("env variables CF_API_TOKEN and CF_ACCOUNT_ID must be set")
}
storage := certmagic.NewStorage(
memory.NewSync(),
service.Options().Store,
)
config := cloudflare.NewDefaultConfig()
config.AuthToken = apiToken
config.ZoneToken = apiToken
challengeProvider, err := cloudflare.NewDNSProviderConfig(config)
if err != nil {
log.Fatal(err.Error())
}
opts = append(opts,
server.ACMEProvider(
certmagic.NewProvider(
acme.AcceptToS(true),
acme.CA(ACMECA),
acme.Cache(storage),
acme.ChallengeProvider(challengeProvider),
acme.OnDemand(false),
),
),
)
default:
log.Fatalf("%s is not a valid ACME provider\n", ACMEProvider)
}
} else if ctx.Bool("enable_tls") {
config, err := helper.TLSConfig(ctx)
if err != nil {
fmt.Println(err.Error())
return
}
opts = append(opts, server.EnableTLS(true))
opts = append(opts, server.TLSConfig(config))
}
if ctx.Bool("enable_cors") {
opts = append(opts, server.EnableCORS(true))
}
// create the router
var h http.Handler
r := mux.NewRouter()
h = r
if ctx.Bool("enable_stats") {
st := stats.New()
r.HandleFunc("/stats", st.StatsHandler)
h = st.ServeHTTP(r)
st.Start()
defer st.Stop()
}
// return version and list of services
r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.Method == "OPTIONS" {
return
}
response := fmt.Sprintf(`{"version": "%s"}`, ctx.App.Version)
w.Write([]byte(response))
})
// strip favicon.ico
r.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {})
// resolver options
ropts := []resolver.Option{
resolver.WithServicePrefix(Namespace),
resolver.WithHandler(Handler),
}
// default resolver
rr := rrmicro.NewResolver(ropts...)
switch Resolver {
case "host":
rr = host.NewResolver(ropts...)
case "path":
rr = path.NewResolver(ropts...)
case "grpc":
rr = grpc.NewResolver(ropts...)
}
// register rpc handler
if EnableRPC {
log.Infof("Registering RPC Handler at %s", RPCPath)
r.Handle(RPCPath, handler.NewRPCHandler(rr))
}
switch Handler {
case "rpc":
log.Infof("Registering API RPC Handler at %s", APIPath)
rt := regRouter.NewRouter(
router.WithHandler(arpc.Handler),
router.WithResolver(rr),
router.WithRegistry(service.Options().Registry),
)
rp := arpc.NewHandler(
ahandler.WithNamespace(Namespace),
ahandler.WithRouter(rt),
ahandler.WithClient(service.Client()),
)
r.PathPrefix(APIPath).Handler(rp)
case "api":
log.Infof("Registering API Request Handler at %s", APIPath)
rt := regRouter.NewRouter(
router.WithHandler(aapi.Handler),
router.WithResolver(rr),
router.WithRegistry(service.Options().Registry),
)
ap := aapi.NewHandler(
ahandler.WithNamespace(Namespace),
ahandler.WithRouter(rt),
ahandler.WithClient(service.Client()),
)
r.PathPrefix(APIPath).Handler(ap)
case "event":
log.Infof("Registering API Event Handler at %s", APIPath)
rt := regRouter.NewRouter(
router.WithHandler(event.Handler),
router.WithResolver(rr),
router.WithRegistry(service.Options().Registry),
)
ev := event.NewHandler(
ahandler.WithNamespace(Namespace),
ahandler.WithRouter(rt),
ahandler.WithClient(service.Client()),
)
r.PathPrefix(APIPath).Handler(ev)
case "http", "proxy":
log.Infof("Registering API HTTP Handler at %s", ProxyPath)
rt := regRouter.NewRouter(
router.WithHandler(ahttp.Handler),
router.WithResolver(rr),
router.WithRegistry(service.Options().Registry),
)
ht := ahttp.NewHandler(
ahandler.WithNamespace(Namespace),
ahandler.WithRouter(rt),
ahandler.WithClient(service.Client()),
)
r.PathPrefix(ProxyPath).Handler(ht)
case "web":
log.Infof("Registering API Web Handler at %s", APIPath)
rt := regRouter.NewRouter(
router.WithHandler(web.Handler),
router.WithResolver(rr),
router.WithRegistry(service.Options().Registry),
)
w := web.NewHandler(
ahandler.WithNamespace(Namespace),
ahandler.WithRouter(rt),
ahandler.WithClient(service.Client()),
)
r.PathPrefix(APIPath).Handler(w)
default:
log.Infof("Registering API Default Handler at %s", APIPath)
rt := regRouter.NewRouter(
router.WithResolver(rr),
router.WithRegistry(service.Options().Registry),
)
r.PathPrefix(APIPath).Handler(handler.Meta(service, rt, Namespace))
}
// create the auth wrapper and the server
authWrapper := auth.Wrapper(rr, Namespace)
api := httpapi.NewServer(Address, server.WrapHandler(authWrapper))
api.Init(opts...)
api.Handle("/", h)
// Start API
if err := api.Start(); err != nil {
log.Fatal(err)
}
// Run server
if err := service.Run(); err != nil {
log.Fatal(err)
}
// Stop API
if err := api.Stop(); err != nil {
log.Fatal(err)
}
}
func Commands(options ...micro.Option) []*cli.Command {
command := &cli.Command{
Name: "api",
Usage: "Run the api gateway",
Action: func(ctx *cli.Context) error {
Run(ctx, options...)
return nil
},
Flags: []cli.Flag{
&cli.StringFlag{
Name: "address",
Usage: "Set the api address e.g 0.0.0.0:8080",
EnvVars: []string{"MICRO_API_ADDRESS"},
},
&cli.StringFlag{
Name: "handler",
Usage: "Specify the request handler to be used for mapping HTTP requests to services; {api, event, http, rpc}",
EnvVars: []string{"MICRO_API_HANDLER"},
},
&cli.StringFlag{
Name: "namespace",
Usage: "Set the namespace used by the API e.g. com.example",
EnvVars: []string{"MICRO_API_NAMESPACE"},
},
&cli.StringFlag{
Name: "type",
Usage: "Set the service type used by the API e.g. api",
EnvVars: []string{"MICRO_API_TYPE"},
},
&cli.StringFlag{
Name: "resolver",
Usage: "Set the hostname resolver used by the API {host, path, grpc}",
EnvVars: []string{"MICRO_API_RESOLVER"},
},
&cli.BoolFlag{
Name: "enable_rpc",
Usage: "Enable call the backend directly via /rpc",
EnvVars: []string{"MICRO_API_ENABLE_RPC"},
},
&cli.BoolFlag{
Name: "enable_cors",
Usage: "Enable CORS, allowing the API to be called by frontend applications",
EnvVars: []string{"MICRO_API_ENABLE_CORS"},
Value: true,
},
},
}
return []*cli.Command{command}
}
| [
"\"CF_API_TOKEN\""
]
| []
| [
"CF_API_TOKEN"
]
| [] | ["CF_API_TOKEN"] | go | 1 | 0 | |
pkg/output/elasticsearch/elasticsearch.go | package elasticsearch
import (
"crypto/tls"
"encoding/base64"
"encoding/json"
"errors"
"github.com/trevorlinton/remote_syslog2/syslog"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
// For more information on elastic search bulk API, see:
// https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
// Syslog creates a new syslog output to elasticsearch
type Syslog struct {
auth int
akkeris bool
node string
index string
url url.URL
esurl url.URL
endpoint string
client *http.Client
packets chan syslog.Packet
errors chan<- error
stop chan struct{}
}
const (
AuthNone int = iota
AuthApiKey
AuthBearer
AuthBasic
)
type elasticSearchHeaderCreate struct {
Source string `json:"_source"`
Id string `json:"_id"`
Index string `json:"_index"`
}
type elasticSearchHeader struct {
Create elasticSearchHeaderCreate `json:"create"`
}
type elasticSearchBody struct {
Timestamp string `json:"@timestamp"`
Hostname string `json:"hostname"`
Tag string `json:"tag"`
Message string `json:"message"`
Severity int `json:"severity"`
Facility int `json:"facility"`
}
var syslogSchemas = []string{"elasticsearch://", "es://", "elasticsearch+https://", "elasticsearch+http://", "es+https://", "es+http://"}
// Test the schema to see if its an elasticsearch schema
func Test(endpoint string) bool {
for _, schema := range syslogSchemas {
if strings.HasPrefix(strings.ToLower(endpoint), schema) == true {
return true
}
}
return false
}
func toURL(endpoint string) string {
if strings.Contains(endpoint, "+https://") == true {
return strings.Replace(strings.Replace(endpoint, "elasticsearch+https://", "https://", 1), "es+https://", "https://", 1)
}
if strings.Contains(endpoint, "+http://") == true {
return strings.Replace(strings.Replace(endpoint, "elasticsearch+http://", "http://", 1), "es+http://", "http://", 1)
}
return strings.Replace(strings.Replace(endpoint, "elasticsearch://", "https://", 1), "es://", "https://", 1)
}
// Create a new elasticsearch endpoint
func Create(endpoint string, errorsCh chan<- error) (*Syslog, error) {
if Test(endpoint) == false {
return nil, errors.New("Invalid endpoint")
}
u, err := url.Parse(toURL(endpoint))
if err != nil {
return nil, err
}
esurl, err := url.Parse(u.String())
if err != nil {
return nil, err
}
if strings.HasPrefix(esurl.Path, "/_bulk") == false {
if strings.HasSuffix(esurl.Path, "/") == true {
esurl.Path = esurl.Path + "_bulk"
} else {
esurl.Path = esurl.Path + "/_bulk"
}
}
auth := AuthNone
if _, ok := esurl.User.Password(); ok {
if strings.ToLower(u.Query().Get("auth")) == "bearer" {
auth = AuthBearer
} else if strings.ToLower(u.Query().Get("auth")) == "apikey" {
auth = AuthApiKey
} else {
auth = AuthBasic
}
}
client := http.Client{}
if u.Query().Get("insecure") == "true" {
client.Transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
q := esurl.Query()
q.Del("auth")
q.Del("index")
q.Del("insecure")
esurl.RawQuery = q.Encode()
esurl.User = nil
node := os.Getenv("NODE") // TODO: pass this into create
if node == "" {
node = "logtrain"
}
return &Syslog{
auth: auth,
node: node,
index: u.Query().Get("index"),
endpoint: endpoint,
url: *u,
esurl: *esurl,
client: &client,
packets: make(chan syslog.Packet, 10),
errors: errorsCh,
stop: make(chan struct{}, 1),
akkeris: os.Getenv("AKKERIS") == "true", // TODO: pass this in to Create for all outputs.
}, nil
}
// Dial connects to an elasticsearch
func (log *Syslog) Dial() error {
go log.loop()
return nil
}
// Close closes the connection to elasticsearch
func (log *Syslog) Close() error {
log.stop <- struct{}{}
close(log.packets)
return nil
}
// Pools returns whether the elasticsearch end point pools connections
func (log *Syslog) Pools() bool {
return true
}
// Packets returns a channel to send syslog packets on
func (log *Syslog) Packets() chan syslog.Packet {
return log.packets
}
func (log *Syslog) loop() {
timer := time.NewTicker(time.Second)
var payload string = ""
for {
select {
case p, ok := <-log.packets:
if !ok {
return
}
var index = log.index
if index == "" {
index = p.Hostname
}
header := elasticSearchHeader{
Create: elasticSearchHeaderCreate{
Source: "logtrain",
Id: strconv.Itoa(int(time.Now().UnixNano())),
Index: index,
},
}
body := elasticSearchBody{
Timestamp: p.Time.Format(syslog.Rfc5424time),
Hostname: p.Hostname,
Tag: p.Tag,
Message: p.Message,
Severity: int(p.Severity),
Facility: int(p.Facility),
}
if h, err := json.Marshal(header); err == nil {
if b, err := json.Marshal(body); err == nil {
payload += string(h) + "\n" + string(b) + "\n"
}
}
case <-timer.C:
if payload != "" {
req, err := http.NewRequest(http.MethodPost, log.esurl.String(), strings.NewReader(string(payload)))
if err != nil {
log.errors <- err
} else {
req.Header.Set("content-type", "application/json")
if pwd, ok := log.url.User.Password(); ok {
if log.auth == AuthBearer {
req.Header.Set("Authorization", "Bearer "+pwd)
} else if log.auth == AuthApiKey {
req.Header.Set("Authorization", "ApiKey "+base64.StdEncoding.EncodeToString([]byte(log.url.User.Username()+":"+string(pwd))))
} else {
req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(log.url.User.Username()+":"+string(pwd))))
}
}
resp, err := log.client.Do(req)
if err != nil {
log.errors <- err
} else {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
body = []byte{}
}
resp.Body.Close()
if resp.StatusCode >= http.StatusMultipleChoices || resp.StatusCode < http.StatusOK {
log.errors <- errors.New("invalid response from endpoint: " + resp.Status + " " + string(body) + "sent: [[ " + payload + " ]]")
}
}
}
payload = ""
}
case <-log.stop:
return
}
}
}
| [
"\"NODE\"",
"\"AKKERIS\""
]
| []
| [
"AKKERIS",
"NODE"
]
| [] | ["AKKERIS", "NODE"] | go | 2 | 0 | |
v2/env.go | package services
import (
"os"
)
// IsLocal returns true if we are running inside a local debug environment instead
// of a production Kubernetes container. It dependes on Version() working correctly.
func IsLocal() bool {
return Version() == ""
}
// Version returns the environment variable VERSION. In development it should be empty.
// In production it should be set accordingly; it may be for example the container hash.
func Version() string {
return os.Getenv("VERSION")
}
| [
"\"VERSION\""
]
| []
| [
"VERSION"
]
| [] | ["VERSION"] | go | 1 | 0 | |
rqd/rqd/cuerqd.py | #!/usr/bin/env python
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Displays information from or sends a command to an RQD host."""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import argparse
from builtins import str
from builtins import object
import logging as log
import os
import random
import grpc
import rqd.compiled_proto.rqd_pb2
import rqd.compiled_proto.rqd_pb2_grpc
import rqd.rqconstants
class RqdHost(object):
"""An RQD host.
This class supplies a wrapper for querying or sending commands to an RQD host."""
def __init__(self, rqdHost, rqdPort=rqd.rqconstants.RQD_GRPC_PORT):
self.rqdHost = rqdHost
self.rqdPort = rqdPort
channel = grpc.insecure_channel('%s:%s' % (self.rqdHost, self.rqdPort))
self.stub = rqd.compiled_proto.rqd_pb2_grpc.RqdInterfaceStub(channel)
self.frameStub = rqd.compiled_proto.rqd_pb2_grpc.RunningFrameStub(channel)
def status(self):
"""Fetches and returns the host status report."""
return self.stub.ReportStatus(rqd.compiled_proto.rqd_pb2.RqdStaticReportStatusRequest())
def getRunningFrame(self, frameId):
"""Returns the host's currently running frame."""
return self.stub.GetRunFrame(
rqd.compiled_proto.rqd_pb2.RqdStaticGetRunFrameRequest(frame_id=frameId))
def nimbyOff(self):
"""Disables Nimby on the host."""
log.info(self.rqdHost, "Turning off Nimby")
log.info("rqd nimbyoff by %s", os.environ.get("USER"))
self.stub.NimbyOff(rqd.compiled_proto.rqd_pb2.RqdStaticNimbyOffRequest())
def nimbyOn(self):
"""Enables Nimby on the host."""
log.info(self.rqdHost, "Turning on Nimby")
log.info("rqd nimbyon by %s", os.environ.get("USER"))
self.stub.NimbyOn(rqd.compiled_proto.rqd_pb2.RqdStaticNimbyOnRequest())
def lockAll(self):
"""Locks all of the host's cores."""
print(self.rqdHost, "Locking all cores")
self.stub.LockAll(rqd.compiled_proto.rqd_pb2.RqdStaticLockAllRequest())
def unlockAll(self):
"""Unlocks all of the host's cores."""
print(self.rqdHost, "Unlocking all cores")
self.stub.UnlockAll(rqd.compiled_proto.rqd_pb2.RqdStaticUnlockAllRequest())
def lock(self, cores):
"""Locks the given number of cores."""
cores = int(cores)
print(self.rqdHost, "Locking %d cores" % cores)
self.stub.Lock(rqd.compiled_proto.rqd_pb2.RqdStaticLockRequest(cores=cores))
def unlock(self, cores):
"""Unlocks the given number of cores."""
cores = int(cores)
print(self.rqdHost, "Unlocking %d cores" % cores)
self.stub.Unlock(rqd.compiled_proto.rqd_pb2.RqdStaticUnlockRequest(cores=cores))
def shutdownRqdIdle(self):
"""Shuts down the host when idle."""
print(self.rqdHost, "Sending shutdownRqdIdle command")
self.stub.ShutdownRqdIdle(rqd.compiled_proto.rqd_pb2.RqdStaticShutdownIdleRequest())
def shutdownRqdNow(self):
"""Shuts down the host now."""
print(self.rqdHost, "Sending shutdownRqdNow command")
try:
self.stub.ShutdownRqdNow(rqd.compiled_proto.rqd_pb2.RqdStaticShutdownNowRequest())
# pylint: disable=broad-except
except Exception:
# Shutting down the service from inside means this request will receive
# a connection error response
pass
def restartRqdIdle(self):
"""Restarts RQD on the host when idle."""
print(self.rqdHost, "Sending restartRqdIdle command")
self.stub.RestartRqdIdle(rqd.compiled_proto.rqd_pb2.RqdStaticRestartIdleRequest())
def restartRqdNow(self):
"""Restarts RQD on the host now."""
print(self.rqdHost, "Sending restartRqdNow command")
self.stub.RestartRqdNow(rqd.compiled_proto.rqd_pb2.RqdStaticRestartNowRequest())
def rebootIdle(self):
"""Reboots the host when idle."""
print(self.rqdHost, "Sending rebootIdle command")
self.stub.RebootIdle(rqd.compiled_proto.rqd_pb2.RqdStaticRebootIdleRequest())
def rebootNow(self):
"""Reboots the host now."""
print(self.rqdHost, "Sending rebootNow command")
self.stub.RebootNow(rqd.compiled_proto.rqd_pb2.RqdStaticRebootNowRequest())
def launchFrame(self, frame):
"""Launches a frame on the host."""
self.stub.LaunchFrame(
rqd.compiled_proto.rqd_pb2.RqdStaticLaunchFrameRequest(run_frame=frame))
def killFrame(self, frameId, message):
"""Kills a frame on the host."""
runFrame = self.getRunningFrame(frameId)
self.frameStub.Kill(run_frame=runFrame, message=message)
def main():
"""Entrypoint for the commandline interface."""
parser = argparse.ArgumentParser()
parser.add_argument(
'host', nargs='?', default='localhost', help='RQD hostname (defaults to localhost)')
parser.add_argument(
'-s', action='store_true', help='Print RQD status')
parser.add_argument(
'-v', action='store_true', help='Print RQD version')
parser.add_argument(
'--lp', metavar='coreID', nargs='+', help='Lock the specified cores')
parser.add_argument(
'--ulp', metavar='coreID', nargs='+', help='Unlock the specified cores')
parser.add_argument(
'--lh', action='store_true', help='Lock all cores for the specified host')
parser.add_argument(
'--ulh', action='store_true', help='Unlock all cores for the specified host')
parser.add_argument(
'--nimbyon', action='store_true',
help="Turn on 'Not in my back yard' (NIMBY) to stop processing on the specified host")
parser.add_argument(
'--nimbyoff', action='store_true',
help="Turn off 'Not in my back yard' (NIMBY) to start processing on the specified host")
parser.add_argument(
'--exit', action='store_true',
help='Lock host, wait until machine is idle, and then shutdown RQD. Any unlock '
'command cancels this request.')
parser.add_argument(
'--exit_now', action='store_true', help='KILL ALL running frames and shutdown RQD')
parser.add_argument(
'--restart', action='store_true',
help='Lock host, wait until machine is idle, and then restart RQD. Any unlock '
'command cancels this request')
parser.add_argument(
'--restart_now', action='store_true', help='KILL ALL running frames and restart RQD')
parser.add_argument(
'--reboot', action='store_true',
help='Lock host, wait until machine is idle, and then REBOOT machine. Any unlock '
'command cancels this request.')
parser.add_argument(
'--reboot_now', action='store_true', help='KILL ALL running frames and REBOOT machine')
parser.add_argument(
'--kill', metavar='frameID', nargs='+',
help='Attempts to kill the given frame via its ICE proxy')
parser.add_argument(
'--getproxy', metavar='frameID', nargs='+', help='Returns the proxy for the given frameid')
parser.add_argument(
'--test_edu_frame', action='store_true',
help='Launch an edu frame test on an idle core (or first core if none are available)')
parser.add_argument(
'--test_script_frame', action='store_true',
help='Launch a script frame test on an idle core (or first core if none are available)')
parser.add_argument(
'--test_script_frame_mac', action='store_true',
help='Launch a script frame test for macOS on an idle core (or first core if '
'none are available)')
args = parser.parse_args()
rqdHost = RqdHost(args.host)
if args.s:
print(rqdHost.status())
if args.v:
tagPrefix = 'rqdv-'
for tag in rqdHost.status().host.tags:
if tag.startswith(tagPrefix):
print("version =", tag[len(tagPrefix):])
if args.nimbyoff:
rqdHost.nimbyOff()
if args.nimbyon:
rqdHost.nimbyOn()
if args.lp is not None:
for arg in args.lp:
rqdHost.lock(arg)
if args.ulp is not None:
for arg in args.ulp:
rqdHost.unlock(arg)
if args.lh is not None:
rqdHost.lockAll()
if args.ulh is not None:
rqdHost.unlockAll()
if args.exit_now:
rqdHost.shutdownRqdNow()
elif args.exit:
rqdHost.shutdownRqdIdle()
if args.restart_now:
rqdHost.restartRqdNow()
elif args.restart:
rqdHost.restartRqdIdle()
if args.reboot_now:
rqdHost.rebootNow()
elif args.reboot:
rqdHost.rebootIdle()
if args.kill is not None:
for arg in args.kill:
rqdHost.killFrame(arg, "Killed by %s using cuerqd.py" % os.environ.get("USER"))
if args.getproxy is not None:
for arg in args.getproxy:
frameProxy = rqdHost.getRunningFrame(arg)
print(frameProxy)
if args.test_edu_frame:
print("Launching edu test frame (logs to /mcp)")
frameNum = "0001"
runFrame = rqd.compiled_proto.rqd_pb2.RunFrame()
runFrame.job_id = "SD6F3S72DJ26236KFS"
runFrame.job_name = "edu-trn_jwelborn-jwelborn_teapot_bty"
runFrame.frame_id = "FD1S3I154O646UGSNN%s" % frameNum
runFrame.frame_name = "%s-teapot_bty_3D" % frameNum
runFrame.command = (
"/usr/bin/env VNP_APPLICATION_TIME=1197683283873 /usr/bin/env VNP_VCR_"
"SESSION=3411896 /usr/bin/env PROFILE=default "
"/shots/edu/home/perl/etc/qwrap.cuerun /shots/edu/trn_jwelborn/cue/jwelborn "
"olrun /shots/edu/trn_jwelborn/cue/cue_archive/edu-trn_jwelborn-jwelborn_teapot_bty"
"/v4/teapot_bty.outline %d -batch -event teapot_bty_3D" % int(frameNum))
runFrame.user_name = "jwelborn"
runFrame.log_dir = "/mcp" # This would be on the shottree
runFrame.show = "edu"
runFrame.shot = "trn_jwelborn"
runFrame.uid = 10164
runFrame.num_cores = 100
rqdHost.launchFrame(runFrame)
if args.test_script_frame:
print("Launching script test frame (logs to /mcp)")
runFrame = rqd.compiled_proto.rqd_pb2.RunFrame()
runFrame.resource_id = "8888888877777755555"
runFrame.job_id = "SD6F3S72DJ26236KFS"
runFrame.job_name = "swtest-home-jwelborn_rqd_test"
runFrame.frame_id = "FD1S3I154O646UGSNN" + str(random.randint(0, 99999))
runFrame.frame_name = "0001-preprocess"
# Script output is not buffered due to python -u option
runFrame.command = "/net/people/jwelborn/test_python_u -t 5 -e 0"
runFrame.user_name = "jwelborn"
runFrame.log_dir = "/mcp" # This would be on the shottree
runFrame.show = "swtest"
runFrame.shot = "home"
runFrame.uid = 10164
runFrame.num_cores = 50
rqdHost.launchFrame(runFrame)
if args.test_script_frame_mac:
print("Launching script test frame (logs to /tmp)")
runFrame = rqd.compiled_proto.rqd_pb2.RunFrame()
runFrame.resource_id = "2222222277777755555"
runFrame.job_id = "SD6F3S72DJ26236KFS"
runFrame.job_name = "swtest-home-jwelborn_rqd_test"
runFrame.frame_id = "FD1S3I154O646UGSNN" + str(random.randint(0, 99999))
runFrame.frame_name = "0001-preprocess"
# Script output is not buffered due to python -u option
runFrame.command = "/net/people/jwelborn/test_python_u_mac -t 5 -e 0"
runFrame.user_name = "jwelborn"
runFrame.log_dir = "/tmp" # This would be on the shottree
runFrame.show = "swtest"
runFrame.shot = "home"
runFrame.uid = 10164
runFrame.num_cores = 1
rqdHost.launchFrame(runFrame)
if __name__ == "__main__":
main()
| []
| []
| [
"USER"
]
| [] | ["USER"] | python | 1 | 0 | |
modin/core/dataframe/pandas/partitioning/partition_manager.py | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module holding base PartitionManager class - the thing that tracks partitions across the distribution.
The manager also allows manipulating the data - running functions at each partition, shuffle over the distribution, etc.
"""
from abc import ABC
from functools import wraps
import numpy as np
import pandas
import warnings
from modin.error_message import ErrorMessage
from modin.core.storage_formats.pandas.utils import compute_chunksize
from modin.core.dataframe.pandas.utils import concatenate
from modin.config import NPartitions, ProgressBar, BenchmarkMode
import os
def wait_computations_if_benchmark_mode(func):
"""
Make sure a `func` finished its computations in benchmark mode.
Parameters
----------
func : callable
A function that should be performed in syncronous mode.
Returns
-------
callable
Wrapped function that executes eagerly (if benchmark mode) or original `func`.
Notes
-----
`func` should return NumPy array with partitions.
"""
if BenchmarkMode.get():
@wraps(func)
def wait(*args, **kwargs):
"""Wait for computation results."""
result = func(*args, **kwargs)
if isinstance(result, tuple):
partitions = result[0]
else:
partitions = result
# need to go through all the values of the map iterator
# since `wait` does not return anything, we need to explicitly add
# the return `True` value from the lambda
all(map(lambda partition: partition.wait() or True, partitions.flatten()))
return result
return wait
return func
class PandasDataframePartitionManager(ABC):
"""
Base class for managing the dataframe data layout and operators across the distribution of partitions.
Partition class is the class to use for storing each partition.
Each partition must extend the `PandasDataframePartition` class.
"""
_partition_class = None
# Column partitions class is the class to use to create the column partitions.
_column_partitions_class = None
# Row partitions class is the class to use to create the row partitions.
_row_partition_class = None
@classmethod
def preprocess_func(cls, map_func):
"""
Preprocess a function to be applied to `PandasDataframePartition` objects.
Parameters
----------
map_func : callable
The function to be preprocessed.
Returns
-------
callable
The preprocessed version of the `map_func` provided.
Notes
-----
Preprocessing does not require any specific format, only that the
`PandasDataframePartition.apply` method will recognize it (for the subclass
being used).
If your `PandasDataframePartition` objects assume that a function provided
is serialized or wrapped or in some other format, this is the place
to add that logic. It is possible that this can also just return
`map_func` if the `apply` method of the `PandasDataframePartition` object
you are using does not require any modification to a given function.
"""
return cls._partition_class.preprocess_func(map_func)
# END Abstract Methods
@classmethod
def column_partitions(cls, partitions, full_axis=True):
"""
Get the list of `BaseDataframeAxisPartition` objects representing column-wise paritions.
Parameters
----------
partitions : list-like
List of (smaller) partitions to be combined to column-wise partitions.
full_axis : bool, default: True
Whether or not this partition contains the entire column axis.
Returns
-------
list
A list of `BaseDataframeAxisPartition` objects.
Notes
-----
Each value in this list will be an `BaseDataframeAxisPartition` object.
`BaseDataframeAxisPartition` is located in `axis_partition.py`.
"""
if not isinstance(partitions, list):
partitions = [partitions]
return [
cls._column_partitions_class(col, full_axis=full_axis)
for frame in partitions
for col in frame.T
]
@classmethod
def row_partitions(cls, partitions):
"""
List of `BaseDataframeAxisPartition` objects representing row-wise partitions.
Parameters
----------
partitions : list-like
List of (smaller) partitions to be combined to row-wise partitions.
Returns
-------
list
A list of `BaseDataframeAxisPartition` objects.
Notes
-----
Each value in this list will an `BaseDataframeAxisPartition` object.
`BaseDataframeAxisPartition` is located in `axis_partition.py`.
"""
if not isinstance(partitions, list):
partitions = [partitions]
return [cls._row_partition_class(row) for frame in partitions for row in frame]
@classmethod
def axis_partition(cls, partitions, axis, full_axis: bool = True):
"""
Logically partition along given axis (columns or rows).
Parameters
----------
partitions : list-like
List of partitions to be combined.
axis : {0, 1}
0 for column partitions, 1 for row partitions.
full_axis : bool, default: True
Whether or not this partition contains the entire column axis.
Returns
-------
list
A list of `BaseDataframeAxisPartition` objects.
"""
make_column_partitions = axis == 0
if not full_axis and not make_column_partitions:
raise NotImplementedError(
(
"Row partitions must contain the entire axis. We don't "
+ "support virtual partitioning for row partitions yet."
)
)
return (
cls.column_partitions(partitions)
if make_column_partitions
else cls.row_partitions(partitions)
)
@classmethod
def groupby_reduce(
cls, axis, partitions, by, map_func, reduce_func, apply_indices=None
):
"""
Groupby data using the `map_func` provided along the `axis` over the `partitions` then reduce using `reduce_func`.
Parameters
----------
axis : {0, 1}
Axis to groupby over.
partitions : NumPy 2D array
Partitions of the ModinFrame to groupby.
by : NumPy 2D array
Partitions of 'by' to broadcast.
map_func : callable
Map function.
reduce_func : callable,
Reduce function.
apply_indices : list of ints, default: None
Indices of `axis ^ 1` to apply function over.
Returns
-------
NumPy array
Partitions with applied groupby.
"""
if apply_indices is not None:
partitions = (
partitions[apply_indices] if axis else partitions[:, apply_indices]
)
if by is not None:
mapped_partitions = cls.broadcast_apply(
axis, map_func, left=partitions, right=by, other_name="other"
)
else:
mapped_partitions = cls.map_partitions(partitions, map_func)
return cls.map_axis_partitions(
axis, mapped_partitions, reduce_func, enumerate_partitions=True
)
@classmethod
@wait_computations_if_benchmark_mode
def broadcast_apply_select_indices(
cls,
axis,
apply_func,
left,
right,
left_indices,
right_indices,
keep_remaining=False,
):
"""
Broadcast the `right` partitions to `left` and apply `apply_func` to selected indices.
Parameters
----------
axis : {0, 1}
Axis to apply and broadcast over.
apply_func : callable
Function to apply.
left : NumPy 2D array
Left partitions.
right : NumPy 2D array
Right partitions.
left_indices : list-like
Indices to apply function to.
right_indices : dictionary of indices of right partitions
Indices that you want to bring at specified left partition, for example
dict {key: {key1: [0, 1], key2: [5]}} means that in left[key] you want to
broadcast [right[key1], right[key2]] partitions and internal indices
for `right` must be [[0, 1], [5]].
keep_remaining : bool, default: False
Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns
-------
NumPy array
An array of partition objects.
Notes
-----
Your internal function must take these kwargs:
[`internal_indices`, `other`, `internal_other_indices`] to work correctly!
"""
if not axis:
partitions_for_apply = left.T
right = right.T
else:
partitions_for_apply = left
[obj.drain_call_queue() for row in right for obj in row]
def get_partitions(index):
"""Grab required partitions and indices from `right` and `right_indices`."""
must_grab = right_indices[index]
partitions_list = np.array([right[i] for i in must_grab.keys()])
indices_list = list(must_grab.values())
return {"other": partitions_list, "internal_other_indices": indices_list}
new_partitions = np.array(
[
partitions_for_apply[i]
if i not in left_indices
else cls._apply_func_to_list_of_partitions_broadcast(
apply_func,
partitions_for_apply[i],
internal_indices=left_indices[i],
**get_partitions(i),
)
for i in range(len(partitions_for_apply))
if i in left_indices or keep_remaining
]
)
if not axis:
new_partitions = new_partitions.T
return new_partitions
@classmethod
@wait_computations_if_benchmark_mode
def broadcast_apply(cls, axis, apply_func, left, right, other_name="r"):
"""
Broadcast the `right` partitions to `left` and apply `apply_func` function.
Parameters
----------
axis : {0, 1}
Axis to apply and broadcast over.
apply_func : callable
Function to apply.
left : NumPy 2D array
Left partitions.
right : NumPy 2D array
Right partitions.
other_name : str, default: "r"
Name of key-value argument for `apply_func` that
is used to pass `right` to `apply_func`.
Returns
-------
NumPy array
An of partition objects.
Notes
-----
This will often be overridden by implementations. It materializes the
entire partitions of the right and applies them to the left through `apply`.
"""
[obj.drain_call_queue() for row in right for obj in row]
new_right = np.empty(shape=right.shape[axis], dtype=object)
if axis:
right = right.T
for i in range(len(right)):
new_right[i] = pandas.concat(
[right[i][j].get() for j in range(len(right[i]))], axis=axis ^ 1
)
right = new_right.T if axis else new_right
new_partitions = np.array(
[
[
part.apply(
apply_func,
**{other_name: right[col_idx] if axis else right[row_idx]},
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
return new_partitions
@classmethod
@wait_computations_if_benchmark_mode
def broadcast_axis_partitions(
cls,
axis,
apply_func,
left,
right,
keep_partitioning=False,
apply_indices=None,
enumerate_partitions=False,
lengths=None,
**kwargs,
):
"""
Broadcast the `right` partitions to `left` and apply `apply_func` along full `axis`.
Parameters
----------
axis : {0, 1}
Axis to apply and broadcast over.
apply_func : callable
Function to apply.
left : NumPy 2D array
Left partitions.
right : NumPy 2D array
Right partitions.
keep_partitioning : boolean, default: False
The flag to keep partition boundaries for Modin Frame.
Setting it to True disables shuffling data from one partition to another.
apply_indices : list of ints, default: None
Indices of `axis ^ 1` to apply function over.
enumerate_partitions : bool, default: False
Whether or not to pass partition index into `apply_func`.
Note that `apply_func` must be able to accept `partition_idx` kwarg.
lengths : list of ints, default: None
The list of lengths to shuffle the object.
**kwargs : dict
Additional options that could be used by different engines.
Returns
-------
NumPy array
An array of partition objects.
"""
# Since we are already splitting the DataFrame back up after an
# operation, we will just use this time to compute the number of
# partitions as best we can right now.
if keep_partitioning:
num_splits = len(left) if axis == 0 else len(left.T)
elif lengths:
num_splits = len(lengths)
else:
num_splits = NPartitions.get()
preprocessed_map_func = cls.preprocess_func(apply_func)
left_partitions = cls.axis_partition(left, axis)
right_partitions = None if right is None else cls.axis_partition(right, axis)
# For mapping across the entire axis, we don't maintain partitioning because we
# may want to line to partitioning up with another BlockPartitions object. Since
# we don't need to maintain the partitioning, this gives us the opportunity to
# load-balance the data as well.
kw = {
"num_splits": num_splits,
"other_axis_partition": right_partitions,
}
if lengths:
kw["_lengths"] = lengths
kw["manual_partition"] = True
if apply_indices is None:
apply_indices = np.arange(len(left_partitions))
result_blocks = np.array(
[
left_partitions[i].apply(
preprocessed_map_func,
**kw,
**({"partition_idx": idx} if enumerate_partitions else {}),
**kwargs,
)
for idx, i in enumerate(apply_indices)
]
)
# If we are mapping over columns, they are returned to use the same as
# rows, so we need to transpose the returned 2D NumPy array to return
# the structure to the correct order.
return result_blocks.T if not axis else result_blocks
@classmethod
@wait_computations_if_benchmark_mode
def map_partitions(cls, partitions, map_func):
"""
Apply `map_func` to every partition in `partitions`.
Parameters
----------
partitions : NumPy 2D array
Partitions housing the data of Modin Frame.
map_func : callable
Function to apply.
Returns
-------
NumPy array
An array of partitions
"""
preprocessed_map_func = cls.preprocess_func(map_func)
return np.array(
[
[part.apply(preprocessed_map_func) for part in row_of_parts]
for row_of_parts in partitions
]
)
@classmethod
@wait_computations_if_benchmark_mode
def lazy_map_partitions(cls, partitions, map_func):
"""
Apply `map_func` to every partition in `partitions` *lazily*.
Parameters
----------
partitions : NumPy 2D array
Partitions of Modin Frame.
map_func : callable
Function to apply.
Returns
-------
NumPy array
An array of partitions
"""
preprocessed_map_func = cls.preprocess_func(map_func)
return np.array(
[
[part.add_to_apply_calls(preprocessed_map_func) for part in row]
for row in partitions
]
)
@classmethod
def map_axis_partitions(
cls,
axis,
partitions,
map_func,
keep_partitioning=False,
lengths=None,
enumerate_partitions=False,
**kwargs,
):
"""
Apply `map_func` to every partition in `partitions` along given `axis`.
Parameters
----------
axis : {0, 1}
Axis to perform the map across (0 - index, 1 - columns).
partitions : NumPy 2D array
Partitions of Modin Frame.
map_func : callable
Function to apply.
keep_partitioning : bool, default: False
Whether to keep partitioning for Modin Frame.
Setting it to True stops data shuffling between partitions.
lengths : list of ints, default: None
List of lengths to shuffle the object.
enumerate_partitions : bool, default: False
Whether or not to pass partition index into `map_func`.
Note that `map_func` must be able to accept `partition_idx` kwarg.
**kwargs : dict
Additional options that could be used by different engines.
Returns
-------
NumPy array
An array of new partitions for Modin Frame.
Notes
-----
This method should be used in the case when `map_func` relies on
some global information about the axis.
"""
return cls.broadcast_axis_partitions(
axis=axis,
left=partitions,
apply_func=map_func,
keep_partitioning=keep_partitioning,
right=None,
lengths=lengths,
enumerate_partitions=enumerate_partitions,
**kwargs,
)
@classmethod
def concat(cls, axis, left_parts, right_parts):
"""
Concatenate the blocks of partitions with another set of blocks.
Parameters
----------
axis : int
The axis to concatenate to.
left_parts : np.ndarray
NumPy array of partitions to concatenate with.
right_parts : np.ndarray or list
NumPy array of partitions to be concatenated.
Returns
-------
np.ndarray
A new NumPy array with concatenated partitions.
Notes
-----
Assumes that the blocks are already the same shape on the
dimension being concatenated. A ValueError will be thrown if this
condition is not met.
"""
# TODO: Possible change is `isinstance(right_parts, list)`
if type(right_parts) is list:
# `np.array` with partitions of empty ModinFrame has a shape (0,)
# but `np.concatenate` can concatenate arrays only if its shapes at
# specified axis are equals, so filtering empty frames to avoid concat error
right_parts = [o for o in right_parts if o.size != 0]
to_concat = (
[left_parts] + right_parts if left_parts.size != 0 else right_parts
)
return (
np.concatenate(to_concat, axis=axis) if len(to_concat) else left_parts
)
else:
return np.append(left_parts, right_parts, axis=axis)
@classmethod
def to_pandas(cls, partitions):
"""
Convert NumPy array of PandasDataframePartition to pandas DataFrame.
Parameters
----------
partitions : np.ndarray
NumPy array of PandasDataframePartition.
Returns
-------
pandas.DataFrame
A pandas DataFrame
"""
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
if all(
isinstance(part, pandas.Series) for row in retrieved_objects for part in row
):
axis = 0
elif all(
isinstance(part, pandas.DataFrame)
for row in retrieved_objects
for part in row
):
axis = 1
else:
ErrorMessage.catch_bugs_and_request_email(True)
df_rows = [
pandas.concat([part for part in row], axis=axis)
for row in retrieved_objects
if not all(part.empty for part in row)
]
if len(df_rows) == 0:
return pandas.DataFrame()
else:
return concatenate(df_rows)
@classmethod
def to_numpy(cls, partitions, **kwargs):
"""
Convert NumPy array of PandasDataframePartition to NumPy array of data stored within `partitions`.
Parameters
----------
partitions : np.ndarray
NumPy array of PandasDataframePartition.
**kwargs : dict
Keyword arguments for PandasDataframePartition.to_numpy function.
Returns
-------
np.ndarray
A NumPy array.
"""
return np.block(
[[block.to_numpy(**kwargs) for block in row] for row in partitions]
)
@classmethod
@wait_computations_if_benchmark_mode
def from_pandas(cls, df, return_dims=False):
"""
Return the partitions from pandas.DataFrame.
Parameters
----------
df : pandas.DataFrame
A pandas.DataFrame.
return_dims : bool, default: False
If it's True, return as (np.ndarray, row_lengths, col_widths),
else np.ndarray.
Returns
-------
np.ndarray or (np.ndarray, row_lengths, col_widths)
A NumPy array with partitions (with dimensions or not).
"""
def update_bar(pbar, f):
if ProgressBar.get():
pbar.update(1)
return f
num_splits = NPartitions.get()
put_func = cls._partition_class.put
row_chunksize = compute_chunksize(df.shape[0], num_splits)
col_chunksize = compute_chunksize(df.shape[1], num_splits)
bar_format = (
"{l_bar}{bar}{r_bar}"
if os.environ.get("DEBUG_PROGRESS_BAR", "False") == "True"
else "{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}"
)
if ProgressBar.get():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from tqdm.autonotebook import tqdm as tqdm_notebook
except ImportError:
raise ImportError("Please pip install tqdm to use the progress bar")
rows = max(1, round(len(df) / row_chunksize))
cols = max(1, round(len(df.columns) / col_chunksize))
update_count = rows * cols
pbar = tqdm_notebook(
total=round(update_count),
desc="Distributing Dataframe",
bar_format=bar_format,
)
else:
pbar = None
parts = [
[
update_bar(
pbar,
put_func(
df.iloc[i : i + row_chunksize, j : j + col_chunksize].copy()
),
)
for j in range(0, len(df.columns), col_chunksize)
]
for i in range(0, len(df), row_chunksize)
]
if ProgressBar.get():
pbar.close()
if not return_dims:
return np.array(parts)
else:
row_lengths = [
row_chunksize
if i + row_chunksize < len(df)
else len(df) % row_chunksize or row_chunksize
for i in range(0, len(df), row_chunksize)
]
col_widths = [
col_chunksize
if i + col_chunksize < len(df.columns)
else len(df.columns) % col_chunksize or col_chunksize
for i in range(0, len(df.columns), col_chunksize)
]
return np.array(parts), row_lengths, col_widths
@classmethod
def from_arrow(cls, at, return_dims=False):
"""
Return the partitions from Apache Arrow (PyArrow).
Parameters
----------
at : pyarrow.table
Arrow Table.
return_dims : bool, default: False
If it's True, return as (np.ndarray, row_lengths, col_widths),
else np.ndarray.
Returns
-------
np.ndarray or (np.ndarray, row_lengths, col_widths)
A NumPy array with partitions (with dimensions or not).
"""
return cls.from_pandas(at.to_pandas(), return_dims=return_dims)
@classmethod
def get_indices(cls, axis, partitions, index_func=None):
"""
Get the internal indices stored in the partitions.
Parameters
----------
axis : {0, 1}
Axis to extract the labels over.
partitions : np.ndarray
NumPy array with PandasDataframePartition's.
index_func : callable, default: None
The function to be used to extract the indices.
Returns
-------
pandas.Index
A pandas Index object.
Notes
-----
These are the global indices of the object. This is mostly useful
when you have deleted rows/columns internally, but do not know
which ones were deleted.
"""
ErrorMessage.catch_bugs_and_request_email(not callable(index_func))
func = cls.preprocess_func(index_func)
if axis == 0:
new_idx = (
[idx.apply(func).get() for idx in partitions.T[0]]
if len(partitions.T)
else []
)
else:
new_idx = (
[idx.apply(func).get() for idx in partitions[0]]
if len(partitions)
else []
)
# TODO FIX INFORMATION LEAK!!!!1!!1!!
return new_idx[0].append(new_idx[1:]) if len(new_idx) else new_idx
@classmethod
def _apply_func_to_list_of_partitions_broadcast(
cls, func, partitions, other, **kwargs
):
"""
Apply a function to a list of remote partitions.
`other` partitions will be broadcasted to `partitions`
and `func` will be applied.
Parameters
----------
func : callable
The func to apply.
partitions : np.ndarray
The partitions to which the `func` will apply.
other : np.ndarray
The partitions to be broadcasted to `partitions`.
**kwargs : dict
Keyword arguments for PandasDataframePartition.apply function.
Returns
-------
list
A list of PandasDataframePartition objects.
"""
preprocessed_func = cls.preprocess_func(func)
return [
obj.apply(preprocessed_func, other=[o.get() for o in broadcasted], **kwargs)
for obj, broadcasted in zip(partitions, other.T)
]
@classmethod
def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs):
"""
Apply a function to a list of remote partitions.
Parameters
----------
func : callable
The func to apply.
partitions : np.ndarray
The partitions to which the `func` will apply.
**kwargs : dict
Keyword arguments for PandasDataframePartition.apply function.
Returns
-------
list
A list of PandasDataframePartition objects.
Notes
-----
This preprocesses the `func` first before applying it to the partitions.
"""
preprocessed_func = cls.preprocess_func(func)
return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]
@classmethod
@wait_computations_if_benchmark_mode
def apply_func_to_select_indices(
cls, axis, partitions, func, indices, keep_remaining=False
):
"""
Apply a function to select indices.
Parameters
----------
axis : {0, 1}
Axis to apply the `func` over.
partitions : np.ndarray
The partitions to which the `func` will apply.
func : callable
The function to apply to these indices of partitions.
indices : dict
The indices to apply the function to.
keep_remaining : bool, default: False
Whether or not to keep the other partitions. Some operations
may want to drop the remaining partitions and keep
only the results.
Returns
-------
np.ndarray
A NumPy array with partitions.
Notes
-----
Your internal function must take a kwarg `internal_indices` for
this to work correctly. This prevents information leakage of the
internal index to the external representation.
"""
if partitions.size == 0:
return np.array([[]])
# Handling dictionaries has to be done differently, but we still want
# to figure out the partitions that need to be applied to, so we will
# store the dictionary in a separate variable and assign `indices` to
# the keys to handle it the same as we normally would.
if isinstance(func, dict):
dict_func = func
else:
dict_func = None
if not axis:
partitions_for_apply = partitions.T
else:
partitions_for_apply = partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_func is not None:
if not keep_remaining:
result = np.array(
[
cls._apply_func_to_list_of_partitions(
func,
partitions_for_apply[o_idx],
func_dict={
i_idx: dict_func[i_idx]
for i_idx in list_to_apply
if i_idx >= 0
},
)
for o_idx, list_to_apply in indices.items()
]
)
else:
result = np.array(
[
partitions_for_apply[i]
if i not in indices
else cls._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
func_dict={
idx: dict_func[idx] for idx in indices[i] if idx >= 0
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# We are passing internal indices in here. In order for func to
# actually be able to use this information, it must be able to take in
# the internal indices. This might mean an iloc in the case of Pandas
# or some other way to index into the internal representation.
result = np.array(
[
cls._apply_func_to_list_of_partitions(
func,
partitions_for_apply[idx],
internal_indices=list_to_apply,
)
for idx, list_to_apply in indices.items()
]
)
else:
# The difference here is that we modify a subset and return the
# remaining (non-updated) blocks in their original position.
result = np.array(
[
partitions_for_apply[i]
if i not in indices
else cls._apply_func_to_list_of_partitions(
func, partitions_for_apply[i], internal_indices=indices[i]
)
for i in range(len(partitions_for_apply))
]
)
return result.T if not axis else result
@classmethod
@wait_computations_if_benchmark_mode
def apply_func_to_select_indices_along_full_axis(
cls, axis, partitions, func, indices, keep_remaining=False
):
"""
Apply a function to a select subset of full columns/rows.
Parameters
----------
axis : {0, 1}
The axis to apply the function over.
partitions : np.ndarray
The partitions to which the `func` will apply.
func : callable
The function to apply.
indices : list-like
The global indices to apply the func to.
keep_remaining : bool, default: False
Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns
-------
np.ndarray
A NumPy array with partitions.
Notes
-----
This should be used when you need to apply a function that relies
on some global information for the entire column/row, but only need
to apply a function to a subset.
For your func to operate directly on the indices provided,
it must use `internal_indices` as a keyword argument.
"""
if partitions.size == 0:
return np.array([[]])
# Handling dictionaries has to be done differently, but we still want
# to figure out the partitions that need to be applied to, so we will
# store the dictionary in a separate variable and assign `indices` to
# the keys to handle it the same as we normally would.
if isinstance(func, dict):
dict_func = func
else:
dict_func = None
preprocessed_func = cls.preprocess_func(func)
# Since we might be keeping the remaining blocks that are not modified,
# we have to also keep the block_partitions object in the correct
# direction (transpose for columns).
if not keep_remaining:
selected_partitions = partitions.T if not axis else partitions
selected_partitions = np.array([selected_partitions[i] for i in indices])
selected_partitions = (
selected_partitions.T if not axis else selected_partitions
)
else:
selected_partitions = partitions
if not axis:
partitions_for_apply = cls.column_partitions(selected_partitions)
partitions_for_remaining = partitions.T
else:
partitions_for_apply = cls.row_partitions(selected_partitions)
partitions_for_remaining = partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_func is not None:
if not keep_remaining:
result = np.array(
[
part.apply(
preprocessed_func,
func_dict={idx: dict_func[idx] for idx in indices[i]},
)
for i, part in zip(indices, partitions_for_apply)
]
)
else:
result = np.array(
[
partitions_for_remaining[i]
if i not in indices
else cls._apply_func_to_list_of_partitions(
preprocessed_func,
partitions_for_apply[i],
func_dict={idx: dict_func[idx] for idx in indices[i]},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
part.apply(preprocessed_func, internal_indices=indices[i])
for i, part in zip(indices, partitions_for_apply)
]
)
else:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_remaining[i]
if i not in indices
else partitions_for_apply[i].apply(
preprocessed_func, internal_indices=indices[i]
)
for i in range(len(partitions_for_remaining))
]
)
return result.T if not axis else result
@classmethod
@wait_computations_if_benchmark_mode
def apply_func_to_indices_both_axis(
cls,
partitions,
func,
row_partitions_list,
col_partitions_list,
item_to_distribute=None,
row_lengths=None,
col_widths=None,
):
"""
Apply a function along both axes.
Parameters
----------
partitions : np.ndarray
The partitions to which the `func` will apply.
func : callable
The function to apply.
row_partitions_list : iterable of tuples
Iterable of tuples, containing 2 values:
1. Integer row partition index.
2. Internal row indexer of this partition.
col_partitions_list : iterable of tuples
Iterable of tuples, containing 2 values:
1. Integer column partition index.
2. Internal column indexer of this partition.
item_to_distribute : item, default: None
The item to split up so it can be applied over both axes.
row_lengths : list of ints, optional
Lengths of partitions for every row. If not specified this information
is extracted from partitions itself.
col_widths : list of ints, optional
Widths of partitions for every column. If not specified this information
is extracted from partitions itself.
Returns
-------
np.ndarray
A NumPy array with partitions.
Notes
-----
For your func to operate directly on the indices provided,
it must use `row_internal_indices`, `col_internal_indices` as keyword
arguments.
"""
partition_copy = partitions.copy()
row_position_counter = 0
if row_lengths is None:
row_lengths = [None] * len(row_partitions_list)
if col_widths is None:
col_widths = [None] * len(col_partitions_list)
def compute_part_size(indexer, remote_part, part_idx, axis):
"""Compute indexer length along the specified axis for the passed partition."""
if isinstance(indexer, slice):
shapes_container = row_lengths if axis == 0 else col_widths
part_size = shapes_container[part_idx]
if part_size is None:
part_size = (
remote_part.length() if axis == 0 else remote_part.width()
)
shapes_container[part_idx] = part_size
indexer = range(*indexer.indices(part_size))
return len(indexer)
for row_idx, row_values in enumerate(row_partitions_list):
row_blk_idx, row_internal_idx = row_values
col_position_counter = 0
for col_idx, col_values in enumerate(col_partitions_list):
col_blk_idx, col_internal_idx = col_values
remote_part = partition_copy[row_blk_idx, col_blk_idx]
row_offset = compute_part_size(
row_internal_idx, remote_part, row_idx, axis=0
)
col_offset = compute_part_size(
col_internal_idx, remote_part, col_idx, axis=1
)
# We want to eventually make item_to_distribute an np.ndarray,
# but that doesn't work for setting a subset of a categorical
# column, as per https://github.com/modin-project/modin/issues/3736.
# In that case, `item` is not an ndarray but instead some
# categorical variable, which we we don't need to distribute
# at all. Note that np.ndarray is not hashable, so it can't
# be a categorical variable.
# TODO(https://github.com/pandas-dev/pandas/issues/44703): Delete
# this special case once the pandas bug is fixed.
if item_to_distribute is not None:
if isinstance(item_to_distribute, np.ndarray):
item = item_to_distribute[
row_position_counter : row_position_counter + row_offset,
col_position_counter : col_position_counter + col_offset,
]
else:
item = item_to_distribute
item = {"item": item}
else:
item = {}
block_result = remote_part.add_to_apply_calls(
func,
row_internal_indices=row_internal_idx,
col_internal_indices=col_internal_idx,
**item,
)
partition_copy[row_blk_idx, col_blk_idx] = block_result
col_position_counter += col_offset
row_position_counter += row_offset
return partition_copy
@classmethod
@wait_computations_if_benchmark_mode
def binary_operation(cls, axis, left, func, right):
"""
Apply a function that requires two PandasDataframe objects.
Parameters
----------
axis : {0, 1}
The axis to apply the function over (0 - rows, 1 - columns).
left : np.ndarray
The partitions of left PandasDataframe.
func : callable
The function to apply.
right : np.ndarray
The partitions of right PandasDataframe.
Returns
-------
np.ndarray
A NumPy array with new partitions.
"""
if axis:
left_partitions = cls.row_partitions(left)
right_partitions = cls.row_partitions(right)
else:
left_partitions = cls.column_partitions(left)
right_partitions = cls.column_partitions(right)
func = cls.preprocess_func(func)
result = np.array(
[
left_partitions[i].apply(
func,
num_splits=NPartitions.get(),
other_axis_partition=right_partitions[i],
)
for i in range(len(left_partitions))
]
)
return result if axis else result.T
@classmethod
@wait_computations_if_benchmark_mode
def finalize(cls, partitions):
"""
Perform all deferred calls on partitions.
Parameters
----------
partitions : np.ndarray
Partitions of Modin Dataframe on which all deferred calls should be performed.
"""
[part.drain_call_queue() for row in partitions for part in row]
@classmethod
def rebalance_partitions(cls, partitions):
"""
Return the provided array of partitions without rebalancing it.
Parameters
----------
partitions : np.ndarray
The 2-d array of partitions to rebalance.
Returns
-------
np.ndarray
The same 2-d array.
"""
return partitions
| []
| []
| [
"DEBUG_PROGRESS_BAR"
]
| [] | ["DEBUG_PROGRESS_BAR"] | python | 1 | 0 | |
instance.go | package tweed
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
)
const (
// don't forget to update the api/responses.go copies as well!
QuietState = "quiet"
ProvisioningState = "provisioning"
DeprovisioningState = "deprovisioning"
GoneState = "gone"
BindingState = "binding"
UnbindingState = "unbinding"
)
type Instance struct {
ID string
Plan *Plan
State string
Root string
Prefix string
VaultPrefix string
UserParameters map[string]interface{}
Bindings map[string]map[string]interface{}
Tasks []*task
}
type instancemf struct {
Tweed struct {
Prefix string `json:"prefix"`
Instance string `json:"instance"`
Service string `json:"service"`
ServiceID string `json:"service_id"`
Plan string `json:"plan"`
PlanID string `json:"plan_id"`
Vault string `json:"vault"`
Ops map[string]interface{} `json:"ops"`
User map[string]interface{} `json:"user"`
} `json:"tweed"`
}
func (i *Instance) path(rel string) string {
return fmt.Sprintf("%s/%s", i.Root, rel)
}
func (i *Instance) my(rel string) string {
if rel != "" {
rel = "/" + strings.TrimPrefix(rel, "/")
}
return i.path("data/instances/" + i.ID + rel)
}
func (i *Instance) env(env []string) []string {
env = append(env, "HOME="+i.path(""))
env = append(env, "PATH="+os.Getenv("PATH"))
env = append(env, "LANG="+os.Getenv("LANG"))
env = append(env, "INFRASTRUCTURE="+i.path("etc/infrastructures/"+i.Plan.Tweed.Infrastructure))
env = append(env, "STENCIL="+i.path("etc/stencils/"+i.Plan.Tweed.Stencil))
env = append(env, "WORKSPACE="+i.my(""))
env = append(env, "VAULT="+i.VaultPrefix+"/"+i.ID)
env = append(env, "INPUTS=instance.mf")
return env
}
func ParseInstance(cat Catalog, root string, b []byte) (Instance, error) {
var in instancemf
err := json.Unmarshal(b, &in)
if err != nil {
return Instance{}, err
}
p, err := cat.FindPlan(in.Tweed.ServiceID, in.Tweed.PlanID)
if err != nil {
return Instance{}, err
}
inst := Instance{
ID: in.Tweed.Instance,
Root: root,
Plan: p,
UserParameters: in.Tweed.User,
State: QuietState,
}
b, err = ioutil.ReadFile(inst.my("lifecycle/data/state"))
if err == nil {
inst.State = strings.TrimSpace(string(b))
}
return inst, nil
}
func (i *Instance) lookupBindings(id string) error {
if i.Bindings == nil {
i.Bindings = make(map[string]map[string]interface{})
}
b, err := run1(Exec{
Run: i.path("bin/bindings"),
Env: i.env([]string{"BINDING=" + id}),
})
if err != nil {
return err
}
var all map[string]map[string]interface{}
err = json.Unmarshal(b, &all)
if err != nil {
return err
}
for _, bindings := range all {
for id, raw := range bindings {
s, ok := raw.(string)
if !ok {
return fmt.Errorf("binding %s/%s is not a string")
}
var v map[string]interface{}
if err := json.Unmarshal([]byte(s), &v); err != nil {
return err
}
i.Bindings[id] = v
}
}
return nil
}
func (i *Instance) LookupBindings() error {
return i.lookupBindings("")
}
func (i *Instance) LookupBinding(id string) error {
return i.lookupBindings(id)
}
func (i *Instance) Log() string {
b, _ := ioutil.ReadFile(i.my("log"))
return string(b)
}
func (i *Instance) do(cmd, begin, middle, end string) (*task, error) {
if begin != "" && i.State != begin {
return nil, fmt.Errorf("service instance '%s' is currently %s", i.ID, i.State)
}
i.State = middle
t := background(Exec{
Run: i.path(cmd),
Env: i.env(nil),
}, func() {
fmt.Printf("updating state to '%s'\n", end)
i.State = end
})
i.Tasks = append(i.Tasks, t)
return t, nil
}
func (i *Instance) Provision() (*task, error) {
if err := i.Viable(); err != nil {
return nil, err
}
var out instancemf
out.Tweed.Prefix = i.Prefix
out.Tweed.Instance = i.ID
out.Tweed.Service = i.Plan.Service.Name
out.Tweed.ServiceID = i.Plan.Service.ID
out.Tweed.Plan = i.Plan.Name
out.Tweed.PlanID = i.Plan.ID
out.Tweed.Vault = `(( concat "` + i.VaultPrefix + `/" tweed.instance ))`
out.Tweed.Ops = i.Plan.Tweed.Config
out.Tweed.User = i.UserParameters
input, err := json.Marshal(out)
if err != nil {
return nil, err
}
root := i.my("")
if err := os.MkdirAll(root, 0755); err != nil {
return nil, err
}
if err := ioutil.WriteFile(root+"/instance.mf", input, 0666); err != nil {
return nil, err
}
return i.do("bin/provision", "", ProvisioningState, QuietState)
}
func (i *Instance) Bind(id string) (*task, error) {
if i.State != QuietState {
return nil, fmt.Errorf("service instance '%s' is currently %s", i.ID, i.State)
}
if err := i.Viable(); err != nil {
return nil, err
}
i.State = BindingState
t := background(Exec{
Run: i.path("bin/bind"),
Env: i.env([]string{
"BINDING=" + id,
"OVERRIDES=" + i.CredentialOverrides(),
}),
}, func() {
i.State = QuietState
if err := i.LookupBinding(id); err != nil {
fmt.Fprintf(os.Stderr, "failed to look up newly-created binding %s/%s: %s", i.ID, id, err)
}
})
i.Tasks = append(i.Tasks, t)
return t, nil
}
func (i *Instance) Unbind(id string) (*task, error) {
if i.State != QuietState {
return nil, fmt.Errorf("service instance '%s' is currently %s", i.ID, i.State)
}
if err := i.Viable(); err != nil {
return nil, err
}
i.State = UnbindingState
t := background(Exec{
Run: i.path("bin/unbind"),
Env: i.env([]string{"BINDING=" + id}),
}, func() {
i.State = QuietState
delete(i.Bindings, id)
})
i.Tasks = append(i.Tasks, t)
return t, nil
}
func (i *Instance) Deprovision() (*task, error) {
if err := i.Viable(); err != nil {
return nil, err
}
return i.do("bin/deprovision", QuietState, DeprovisioningState, GoneState)
}
func (i *Instance) Purge() error {
if i.State != GoneState {
return fmt.Errorf("service instance '%s' is currently %s", i.ID, i.State)
}
return os.RemoveAll(i.my(""))
}
func (i *Instance) Viable() error {
out, err := run1(Exec{
Run: i.path("bin/viable"),
Env: i.env(nil),
})
if err != nil {
return fmt.Errorf("stencil viability check failed: %s", string(out))
}
return nil
}
func (i *Instance) CredentialOverrides() string {
if i.Plan.Tweed.Credentials == nil {
return `{}`
}
out := map[string]interface{}{
"credentials": i.Plan.Tweed.Credentials,
}
if b, err := json.Marshal(&out); err != nil {
return `{}`
} else {
return string(b)
}
}
func (i *Instance) Files() ([]File, error) {
out, err := run1(Exec{
Run: i.path("bin/files"),
Env: i.env(nil),
})
if err != nil {
return nil, err
}
var f struct {
Files []File `json:"files"`
}
return f.Files, json.Unmarshal(out, &f)
}
func (i Instance) IsBusy() bool {
return i.State == ProvisioningState || i.State == DeprovisioningState || i.State == BindingState || i.State == UnbindingState
}
func (i Instance) IsQuiet() bool {
return i.State == QuietState
}
func (i Instance) IsGone() bool {
return i.State == GoneState
}
| [
"\"PATH\"",
"\"LANG\""
]
| []
| [
"LANG",
"PATH"
]
| [] | ["LANG", "PATH"] | go | 2 | 0 | |
tests/test_job.py | # -*- encoding: utf-8 -*-
# Copyright 2009-2013 Yelp and Contributors
# Copyright 2015 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit testing of MRJob."""
import os
import sys
import time
from io import BytesIO
from subprocess import Popen
from subprocess import PIPE
from mrjob.conf import combine_envs
from mrjob.job import MRJob
from mrjob.job import UsageError
from mrjob.job import _im_func
from mrjob.parse import parse_mr_job_stderr
from mrjob.protocol import BytesValueProtocol
from mrjob.protocol import JSONProtocol
from mrjob.protocol import JSONValueProtocol
from mrjob.protocol import PickleProtocol
from mrjob.protocol import RawValueProtocol
from mrjob.protocol import ReprProtocol
from mrjob.protocol import ReprValueProtocol
from mrjob.protocol import StandardJSONProtocol
from mrjob.py2 import StringIO
from mrjob.step import _IDENTITY_MAPPER
from mrjob.step import _IDENTITY_REDUCER
from mrjob.step import JarStep
from mrjob.step import MRStep
from mrjob.util import log_to_stream
from tests.mr_hadoop_format_job import MRHadoopFormatJob
from tests.mr_tower_of_powers import MRTowerOfPowers
from tests.mr_two_step_job import MRTwoStepJob
from tests.py2 import TestCase
from tests.quiet import logger_disabled
from tests.quiet import no_handlers_for_logger
from tests.sandbox import EmptyMrjobConfTestCase
from tests.sandbox import mrjob_conf_patcher
from tests.sandbox import SandboxedTestCase
# These can't be invoked as a separate script, but they don't need to be
class MRBoringJob(MRJob):
"""It's a boring job, but somebody had to do it."""
def mapper(self, key, value):
yield(key, value)
def reducer(self, key, values):
yield(key, list(values))
class MRInitJob(MRJob):
def __init__(self, *args, **kwargs):
super(MRInitJob, self).__init__(*args, **kwargs)
self.sum_amount = 0
self.multiplier = 0
self.combiner_multipler = 1
def mapper_init(self):
self.sum_amount += 10
def mapper(self, key, value):
yield(None, self.sum_amount)
def reducer_init(self):
self.multiplier += 10
def reducer(self, key, values):
yield(None, sum(values) * self.multiplier)
def combiner_init(self):
self.combiner_multiplier = 2
def combiner(self, key, values):
yield(None, sum(values) * self.combiner_multiplier)
### Test cases ###
class MRInitTestCase(EmptyMrjobConfTestCase):
def test_mapper(self):
j = MRInitJob()
j.mapper_init()
self.assertEqual(next(j.mapper(None, None)), (None, j.sum_amount))
def test_init_funcs(self):
num_inputs = 2
stdin = BytesIO(b"x\n" * num_inputs)
mr_job = MRInitJob(['-r', 'inline', '-'])
mr_job.sandbox(stdin=stdin)
results = []
with mr_job.make_runner() as runner:
runner.run()
for line in runner.stream_output():
key, value = mr_job.parse_output_line(line)
results.append(value)
# these numbers should match if mapper_init, reducer_init, and
# combiner_init were called as expected
self.assertEqual(results[0], num_inputs * 10 * 10 * 2)
class NoTzsetTestCase(TestCase):
def setUp(self):
self.remove_time_tzset()
def tearDown(self):
"""Test systems without time.tzset() (e.g. Windows). See Issue #46."""
self.restore_time_tzset()
def remove_time_tzset(self):
if hasattr(time, 'tzset'):
self._real_time_tzset = time.tzset
del time.tzset
def restore_time_tzset(self):
if hasattr(self, '_real_time_tzset'):
time.tzset = self._real_time_tzset
def test_init_does_not_require_tzset(self):
MRJob()
class CountersAndStatusTestCase(TestCase):
def test_counters_and_status(self):
mr_job = MRJob().sandbox()
mr_job.increment_counter('Foo', 'Bar')
mr_job.set_status('Initializing qux gradients...')
mr_job.increment_counter('Foo', 'Bar')
mr_job.increment_counter('Foo', 'Baz', 20)
mr_job.set_status('Sorting metasyntactic variables...')
parsed_stderr = parse_mr_job_stderr(mr_job.stderr.getvalue())
self.assertEqual(parsed_stderr,
{'counters': {'Foo': {'Bar': 2, 'Baz': 20}},
'statuses': ['Initializing qux gradients...',
'Sorting metasyntactic variables...'],
'other': []})
def test_unicode_set_status(self):
mr_job = MRJob().sandbox()
# shouldn't raise an exception
mr_job.set_status(u'💩')
def test_unicode_counter(self):
mr_job = MRJob().sandbox()
# shouldn't raise an exception
mr_job.increment_counter(u'💩', 'x', 1)
def test_negative_and_zero_counters(self):
mr_job = MRJob().sandbox()
mr_job.increment_counter('Foo', 'Bar', -1)
mr_job.increment_counter('Foo', 'Baz')
mr_job.increment_counter('Foo', 'Baz', -1)
mr_job.increment_counter('Qux', 'Quux', 0)
parsed_stderr = parse_mr_job_stderr(mr_job.stderr.getvalue())
self.assertEqual(parsed_stderr['counters'],
{'Foo': {'Bar': -1, 'Baz': 0}, 'Qux': {'Quux': 0}})
def test_bad_counter_amounts(self):
mr_job = MRJob().sandbox()
self.assertRaises(TypeError,
mr_job.increment_counter, 'Foo', 'Bar', 'two')
self.assertRaises(TypeError,
mr_job.increment_counter, 'Foo', 'Bar', None)
def test_commas_in_counters(self):
# commas should be replaced with semicolons
mr_job = MRJob().sandbox()
mr_job.increment_counter('Bad items', 'a, b, c')
mr_job.increment_counter('girl, interrupted', 'movie')
parsed_stderr = parse_mr_job_stderr(mr_job.stderr.getvalue())
self.assertEqual(parsed_stderr['counters'],
{'Bad items': {'a; b; c': 1},
'girl; interrupted': {'movie': 1}})
class ProtocolsTestCase(TestCase):
# not putting these in their own files because we're not going to invoke
# it as a script anyway.
class MRBoringJob2(MRBoringJob):
INPUT_PROTOCOL = StandardJSONProtocol
INTERNAL_PROTOCOL = PickleProtocol
OUTPUT_PROTOCOL = ReprProtocol
class MRBoringJob3(MRBoringJob):
def internal_protocol(self):
return ReprProtocol()
class MRBoringJob4(MRBoringJob):
INTERNAL_PROTOCOL = ReprProtocol
class MRTrivialJob(MRJob):
OUTPUT_PROTOCOL = RawValueProtocol
def mapper(self, key, value):
yield key, value
def assertMethodsEqual(self, fs, gs):
# we're going to use this to match bound against unbound methods
self.assertEqual([_im_func(f) for f in fs],
[_im_func(g) for g in gs])
def test_default_protocols(self):
mr_job = MRBoringJob()
self.assertMethodsEqual(
mr_job.pick_protocols(0, 'mapper'),
(RawValueProtocol.read, JSONProtocol.write))
self.assertMethodsEqual(
mr_job.pick_protocols(0, 'reducer'),
(StandardJSONProtocol.read, JSONProtocol.write))
def test_explicit_default_protocols(self):
mr_job2 = self.MRBoringJob2().sandbox()
self.assertMethodsEqual(mr_job2.pick_protocols(0, 'mapper'),
(JSONProtocol.read, PickleProtocol.write))
self.assertMethodsEqual(mr_job2.pick_protocols(0, 'reducer'),
(PickleProtocol.read, ReprProtocol.write))
mr_job3 = self.MRBoringJob3()
self.assertMethodsEqual(mr_job3.pick_protocols(0, 'mapper'),
(RawValueProtocol.read, ReprProtocol.write))
# output protocol should default to JSON
self.assertMethodsEqual(mr_job3.pick_protocols(0, 'reducer'),
(ReprProtocol.read, JSONProtocol.write))
mr_job4 = self.MRBoringJob4()
self.assertMethodsEqual(mr_job4.pick_protocols(0, 'mapper'),
(RawValueProtocol.read, ReprProtocol.write))
# output protocol should default to JSON
self.assertMethodsEqual(mr_job4.pick_protocols(0, 'reducer'),
(ReprProtocol.read, JSONProtocol.write))
def test_mapper_raw_value_to_json(self):
RAW_INPUT = BytesIO(b'foo\nbar\nbaz\n')
mr_job = MRBoringJob(['--mapper'])
mr_job.sandbox(stdin=RAW_INPUT)
mr_job.run_mapper()
self.assertEqual(mr_job.stdout.getvalue(),
b'null\t"foo"\n' +
b'null\t"bar"\n' +
b'null\t"baz"\n')
def test_reducer_json_to_json(self):
JSON_INPUT = BytesIO(b'"foo"\t"bar"\n' +
b'"foo"\t"baz"\n' +
b'"bar"\t"qux"\n')
mr_job = MRBoringJob(args=['--reducer'])
mr_job.sandbox(stdin=JSON_INPUT)
mr_job.run_reducer()
# ujson doesn't add whitespace to JSON
self.assertEqual(mr_job.stdout.getvalue().replace(b' ', b''),
(b'"foo"\t["bar","baz"]\n' +
b'"bar"\t["qux"]\n'))
def test_output_protocol_with_no_final_reducer(self):
# if there's no reducer, the last mapper should use the
# output protocol (in this case, repr)
RAW_INPUT = BytesIO(b'foo\nbar\nbaz\n')
mr_job = self.MRTrivialJob(['--mapper'])
mr_job.sandbox(stdin=RAW_INPUT)
mr_job.run_mapper()
self.assertEqual(mr_job.stdout.getvalue(),
RAW_INPUT.getvalue())
class StrictProtocolsTestCase(EmptyMrjobConfTestCase):
class MRBoringReprAndJSONJob(MRBoringJob):
# allowing reading in bytes that can't be JSON-encoded
INPUT_PROTOCOL = ReprValueProtocol
INTERNAL_PROTOCOL = StandardJSONProtocol
OUTPUT_PROTOCOL = StandardJSONProtocol
class MRBoringJSONJob(MRJob):
INPUT_PROTOCOL = StandardJSONProtocol
INTERNAL_PROTOCOL = StandardJSONProtocol
OUTPUT_PROTOCOL = StandardJSONProtocol
def reducer(self, key, values):
yield(key, list(values))
BAD_JSON_INPUT = (b'BAD\tJSON\n' +
b'"foo"\t"bar"\n' +
b'"too"\t"many"\t"tabs"\n' +
b'"notabs"\n')
UNENCODABLE_REPR_INPUT = (b"'foo'\n" +
b'set()\n' +
b"'bar'\n")
STRICT_MRJOB_CONF ={'runners': {'inline': {'strict_protocols': True}}}
def assertJobHandlesUndecodableInput(self, job_args):
job = self.MRBoringJSONJob(job_args)
job.sandbox(stdin=BytesIO(self.BAD_JSON_INPUT))
with job.make_runner() as r:
r.run()
# good data should still get through
self.assertEqual(b''.join(r.stream_output()), b'"foo"\t["bar"]\n')
# exception type varies between JSON implementations,
# so just make sure there were three exceptions of some sort
counters = r.counters()[0]
self.assertEqual(sorted(counters), ['Undecodable input'])
self.assertEqual(
sum(counters['Undecodable input'].values()), 3)
def assertJobRaisesExceptionOnUndecodableInput(self, job_args):
job = self.MRBoringJSONJob(job_args)
job.sandbox(stdin=BytesIO(self.BAD_JSON_INPUT))
with job.make_runner() as r:
self.assertRaises(Exception, r.run)
def assertJobHandlesUnencodableOutput(self, job_args):
job = self.MRBoringReprAndJSONJob(job_args)
job.sandbox(stdin=BytesIO(self.UNENCODABLE_REPR_INPUT))
with job.make_runner() as r:
r.run()
# good data should still get through
self.assertEqual(b''.join(r.stream_output()),
b'null\t["bar", "foo"]\n')
counters = r.counters()[0]
# there should be one Unencodable output error. Exception
# type may vary by json implementation
self.assertEqual(list(counters), ['Unencodable output'])
self.assertEqual(list(counters['Unencodable output'].values()), [1])
def assertJobRaisesExceptionOnUnencodableOutput(self, job_args):
job = self.MRBoringReprAndJSONJob(job_args)
job.sandbox(stdin=BytesIO(self.UNENCODABLE_REPR_INPUT))
with job.make_runner() as r:
self.assertRaises(Exception, r.run)
def test_undecodable_input(self):
self.assertJobRaisesExceptionOnUndecodableInput(job_args=[])
def test_undecodable_input_strict(self):
self.assertJobRaisesExceptionOnUndecodableInput(
job_args=['--strict-protocols'])
def test_undecodable_input_strict_in_mrjob_conf(self):
with mrjob_conf_patcher(self.STRICT_MRJOB_CONF):
self.assertJobRaisesExceptionOnUndecodableInput(
job_args=['--strict-protocols'])
def test_undecodable_input_no_strict_protocols(self):
with mrjob_conf_patcher(self.STRICT_MRJOB_CONF):
self.assertJobHandlesUndecodableInput(
job_args=['--no-strict-protocols'])
def test_unencodable_output(self):
self.assertJobRaisesExceptionOnUnencodableOutput(job_args=[])
def test_unencodable_output_strict(self):
self.assertJobRaisesExceptionOnUnencodableOutput(
job_args=['--strict-protocols'])
def test_unencodable_output_strict_in_mrjob_conf(self):
with mrjob_conf_patcher(self.STRICT_MRJOB_CONF):
self.assertJobRaisesExceptionOnUnencodableOutput(
job_args=['--strict-protocols'])
def test_unencodable_output_no_strict_protocols(self):
with mrjob_conf_patcher(self.STRICT_MRJOB_CONF):
self.assertJobHandlesUnencodableOutput(
job_args=['--no-strict-protocols'])
class PickProtocolsTestCase(TestCase):
def _yield_none(self, *args, **kwargs):
yield None
def _make_job(self, steps):
class CustomJob(MRJob):
INPUT_PROTOCOL = PickleProtocol
INTERNAL_PROTOCOL = JSONProtocol
OUTPUT_PROTOCOL = JSONValueProtocol
def steps(self):
return steps
args = ['--no-conf']
return CustomJob(args)
def _assert_script_protocols(self, steps, expected_protocols):
"""Given a list of (read_protocol_class, write_protocol_class) tuples
for *each substep*, assert that the given _steps_desc() output for each
substep matches the protocols in order
"""
j = self._make_job(steps)
for i, step in enumerate(steps):
expected_step = expected_protocols[i]
step_desc = step.description(i)
if step_desc['type'] == 'jar':
# step_type for a non-script step is undefined
self.assertIsNone(expected_step)
else:
for substep_key in ('mapper', 'combiner', 'reducer'):
if substep_key in step_desc:
self.assertIn(substep_key, expected_step)
expected_substep = expected_step[substep_key]
try:
actual_read, actual_write = (
j._pick_protocol_instances(i, substep_key))
except ValueError:
self.assertIsNone(expected_substep)
else:
expected_read, expected_write = expected_substep
self.assertIsInstance(actual_read, expected_read)
self.assertIsInstance(actual_write, expected_write)
else:
self.assertNotIn(substep_key, expected_step)
def test_single_mapper(self):
self._assert_script_protocols(
[MRStep(mapper=self._yield_none)],
[dict(mapper=(PickleProtocol, JSONValueProtocol))])
def test_single_reducer(self):
# MRStep transparently adds mapper
self._assert_script_protocols(
[MRStep(reducer=self._yield_none)],
[dict(mapper=(PickleProtocol, JSONProtocol),
reducer=(JSONProtocol, JSONValueProtocol))])
def test_mapper_combiner(self):
self._assert_script_protocols(
[MRStep(mapper=self._yield_none,
combiner=self._yield_none)],
[dict(mapper=(PickleProtocol, JSONValueProtocol),
combiner=(JSONValueProtocol, JSONValueProtocol))])
def test_mapper_combiner_reducer(self):
self._assert_script_protocols(
[MRStep(
mapper=self._yield_none,
combiner=self._yield_none,
reducer=self._yield_none)],
[dict(mapper=(PickleProtocol, JSONProtocol),
combiner=(JSONProtocol, JSONProtocol),
reducer=(JSONProtocol, JSONValueProtocol))])
def test_begin_jar_step(self):
self._assert_script_protocols(
[JarStep(jar='binks_jar.jar'),
MRStep(
mapper=self._yield_none,
combiner=self._yield_none,
reducer=self._yield_none)],
[None,
dict(mapper=(PickleProtocol, JSONProtocol),
combiner=(JSONProtocol, JSONProtocol),
reducer=(JSONProtocol, JSONValueProtocol))])
def test_end_jar_step(self):
self._assert_script_protocols(
[MRStep(
mapper=self._yield_none,
combiner=self._yield_none,
reducer=self._yield_none),
JarStep(jar='binks_jar.jar')],
[dict(mapper=(PickleProtocol, JSONProtocol),
combiner=(JSONProtocol, JSONProtocol),
reducer=(JSONProtocol, JSONValueProtocol)),
None])
def test_middle_jar_step(self):
self._assert_script_protocols(
[MRStep(
mapper=self._yield_none,
combiner=self._yield_none),
JarStep(jar='binks_jar.jar'),
MRStep(reducer=self._yield_none)],
[dict(mapper=(PickleProtocol, JSONProtocol),
combiner=(JSONProtocol, JSONProtocol)),
None,
dict(reducer=(JSONProtocol, JSONValueProtocol))])
def test_single_mapper_cmd(self):
self._assert_script_protocols(
[MRStep(mapper_cmd='cat')],
[dict(mapper=None)])
def test_single_mapper_cmd_with_script_combiner(self):
self._assert_script_protocols(
[MRStep(
mapper_cmd='cat',
combiner=self._yield_none)],
[dict(mapper=None,
combiner=(RawValueProtocol, RawValueProtocol))])
def test_single_mapper_cmd_with_script_reducer(self):
# reducer is only script step so it uses INPUT_PROTOCOL and
# OUTPUT_PROTOCOL
self._assert_script_protocols(
[MRStep(
mapper_cmd='cat',
reducer=self._yield_none)],
[dict(mapper=None,
reducer=(PickleProtocol, JSONValueProtocol))])
def test_multistep(self):
# reducer is only script step so it uses INPUT_PROTOCOL and
# OUTPUT_PROTOCOL
self._assert_script_protocols(
[MRStep(mapper_cmd='cat',
reducer=self._yield_none),
JarStep(jar='binks_jar.jar'),
MRStep(mapper=self._yield_none)],
[dict(mapper=None,
reducer=(PickleProtocol, JSONProtocol)),
None,
dict(mapper=(JSONProtocol, JSONValueProtocol))])
class JobConfTestCase(TestCase):
class MRJobConfJob(MRJob):
JOBCONF = {'mapred.foo': 'garply',
'mapred.bar.bar.baz': 'foo'}
class MRJobConfMethodJob(MRJob):
def jobconf(self):
return {'mapred.baz': 'bar'}
class MRBoolJobConfJob(MRJob):
JOBCONF = {'true_value': True,
'false_value': False}
class MRHadoopVersionJobConfJob1(MRJob):
JOBCONF = {'hadoop_version': 1.0}
class MRHadoopVersionJobConfJob2(MRJob):
JOBCONF = {'hadoop_version': 0.18}
class MRHadoopVersionJobConfJob3(MRJob):
JOBCONF = {'hadoop_version': 0.20}
def test_empty(self):
mr_job = MRJob()
self.assertEqual(mr_job.job_runner_kwargs()['jobconf'], {})
def test_cmd_line_options(self):
mr_job = MRJob([
'--jobconf', 'mapred.foo=bar',
'--jobconf', 'mapred.foo=baz',
'--jobconf', 'mapred.qux=quux',
])
self.assertEqual(mr_job.job_runner_kwargs()['jobconf'],
{'mapred.foo': 'baz', # second option takes priority
'mapred.qux': 'quux'})
def test_bool_options(self):
mr_job = self.MRBoolJobConfJob()
self.assertEqual(mr_job.jobconf()['true_value'], 'true')
self.assertEqual(mr_job.jobconf()['false_value'], 'false')
def assert_hadoop_version(self, JobClass, version_string):
mr_job = JobClass()
mock_log = StringIO()
with no_handlers_for_logger('mrjob.job'):
log_to_stream('mrjob.job', mock_log)
self.assertEqual(mr_job.jobconf()['hadoop_version'],
version_string)
self.assertIn('should be a string', mock_log.getvalue())
def test_float_options(self):
self.assert_hadoop_version(self.MRHadoopVersionJobConfJob1, '1.0')
def test_float_options_2(self):
self.assert_hadoop_version(self.MRHadoopVersionJobConfJob2, '0.18')
def test_float_options_3(self):
self.assert_hadoop_version(self.MRHadoopVersionJobConfJob3, '0.20')
def test_jobconf_method(self):
mr_job = self.MRJobConfJob()
self.assertEqual(mr_job.job_runner_kwargs()['jobconf'],
{'mapred.foo': 'garply',
'mapred.bar.bar.baz': 'foo'})
def test_jobconf_attr_and_cmd_line_options(self):
mr_job = self.MRJobConfJob([
'--jobconf', 'mapred.foo=bar',
'--jobconf', 'mapred.foo=baz',
'--jobconf', 'mapred.qux=quux',
])
self.assertEqual(mr_job.job_runner_kwargs()['jobconf'],
{'mapred.bar.bar.baz': 'foo',
'mapred.foo': 'baz', # command line takes priority
'mapred.qux': 'quux'})
def test_redefined_jobconf_method(self):
mr_job = self.MRJobConfMethodJob()
self.assertEqual(mr_job.job_runner_kwargs()['jobconf'],
{'mapred.baz': 'bar'})
def test_redefined_jobconf_method_overrides_cmd_line(self):
mr_job = self.MRJobConfMethodJob([
'--jobconf', 'mapred.foo=bar',
'--jobconf', 'mapred.baz=foo',
])
# --jobconf is ignored because that's the way we defined jobconf()
self.assertEqual(mr_job.job_runner_kwargs()['jobconf'],
{'mapred.baz': 'bar'})
class MRSortValuesJob(MRJob):
SORT_VALUES = True
# need to define a mapper or reducer
def mapper_init(self):
pass
class MRSortValuesAndMoreJob(MRSortValuesJob):
PARTITIONER = 'org.apache.hadoop.mapred.lib.HashPartitioner'
JOBCONF = {
'stream.num.map.output.key.fields': 3,
'mapred.output.key.comparator.class':
'org.apache.hadoop.mapred.lib.KeyFieldBasedComparator',
'mapred.text.key.comparator.options': '-k1 -k2nr',
}
class SortValuesTestCase(TestCase):
def test_sort_values_sets_partitioner(self):
mr_job = MRSortValuesJob()
self.assertEqual(
mr_job.partitioner(),
'org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner')
def test_sort_values_sets_jobconf(self):
mr_job = MRSortValuesJob()
self.assertEqual(
mr_job.jobconf(),
{'stream.num.map.output.key.fields': 2,
'mapred.text.key.partitioner.options': '-k1,1',
'mapred.output.key.comparator.class': None,
'mapred.text.key.comparator.options': None})
def test_can_override_sort_values_from_job(self):
mr_job = MRSortValuesAndMoreJob()
self.assertEqual(
mr_job.partitioner(),
'org.apache.hadoop.mapred.lib.HashPartitioner')
self.assertEqual(
mr_job.jobconf(),
{'stream.num.map.output.key.fields': 3,
'mapred.text.key.partitioner.options': '-k1,1',
'mapred.output.key.comparator.class':
'org.apache.hadoop.mapred.lib.KeyFieldBasedComparator',
'mapred.text.key.comparator.options': '-k1 -k2nr'})
def test_can_override_sort_values_from_cmd_line(self):
mr_job = MRSortValuesJob(
['--partitioner', 'org.pants.FancyPantsPartitioner',
'--jobconf', 'stream.num.map.output.key.fields=lots'])
self.assertEqual(
mr_job.partitioner(),
'org.pants.FancyPantsPartitioner')
self.assertEqual(
mr_job.jobconf(),
{'stream.num.map.output.key.fields': 'lots',
'mapred.text.key.partitioner.options': '-k1,1',
'mapred.output.key.comparator.class': None,
'mapred.text.key.comparator.options': None})
class SortValuesRunnerTestCase(SandboxedTestCase):
MRJOB_CONF_CONTENTS = {'runners': {'inline': {'jobconf': {
'mapred.text.key.partitioner.options': '-k1,1',
'mapred.output.key.comparator.class': 'egypt.god.Anubis',
'foo': 'bar',
}}}}
def test_cant_override_sort_values_from_mrjob_conf(self):
runner = MRSortValuesJob().make_runner()
self.assertEqual(
runner._hadoop_args_for_step(0),
# foo=bar is included, but the other options from mrjob.conf are
# blanked out so as not to mess up SORT_VALUES
['-D', 'foo=bar',
'-D', 'mapred.text.key.partitioner.options=-k1,1',
'-D', 'stream.num.map.output.key.fields=2',
'-partitioner',
'org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner'])
class HadoopFormatTestCase(TestCase):
# MRHadoopFormatJob is imported above
class MRHadoopFormatMethodJob(MRJob):
def hadoop_input_format(self):
return 'mapred.ReasonableInputFormat'
def hadoop_output_format(self):
# not a real Java class, thank god :)
return 'mapred.EbcdicDb2EnterpriseXmlOutputFormat'
def test_empty(self):
mr_job = MRJob()
self.assertEqual(mr_job.job_runner_kwargs()['hadoop_input_format'],
None)
self.assertEqual(mr_job.job_runner_kwargs()['hadoop_output_format'],
None)
def test_hadoop_format_attributes(self):
mr_job = MRHadoopFormatJob()
self.assertEqual(mr_job.job_runner_kwargs()['hadoop_input_format'],
'mapred.FooInputFormat')
self.assertEqual(mr_job.job_runner_kwargs()['hadoop_output_format'],
'mapred.BarOutputFormat')
def test_hadoop_format_methods(self):
mr_job = self.MRHadoopFormatMethodJob()
self.assertEqual(mr_job.job_runner_kwargs()['hadoop_input_format'],
'mapred.ReasonableInputFormat')
self.assertEqual(mr_job.job_runner_kwargs()['hadoop_output_format'],
'mapred.EbcdicDb2EnterpriseXmlOutputFormat')
class PartitionerTestCase(TestCase):
class MRPartitionerJob(MRJob):
PARTITIONER = 'org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner'
def test_empty(self):
mr_job = MRJob()
self.assertEqual(mr_job.job_runner_kwargs()['partitioner'], None)
def test_cmd_line_options(self):
mr_job = MRJob([
'--partitioner', 'java.lang.Object',
'--partitioner', 'org.apache.hadoop.mapreduce.Partitioner'
])
# second option takes priority
self.assertEqual(mr_job.job_runner_kwargs()['partitioner'],
'org.apache.hadoop.mapreduce.Partitioner')
def test_partitioner_attr(self):
mr_job = self.MRPartitionerJob()
self.assertEqual(
mr_job.job_runner_kwargs()['partitioner'],
'org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner')
def test_partitioner_attr_and_cmd_line_options(self):
mr_job = self.MRPartitionerJob([
'--partitioner', 'java.lang.Object',
'--partitioner', 'org.apache.hadoop.mapreduce.Partitioner'
])
# command line takes priority
self.assertEqual(mr_job.job_runner_kwargs()['partitioner'],
'org.apache.hadoop.mapreduce.Partitioner')
class IsMapperOrReducerTestCase(TestCase):
def test_is_mapper_or_reducer(self):
self.assertEqual(MRJob().is_mapper_or_reducer(), False)
self.assertEqual(MRJob(['--mapper']).is_mapper_or_reducer(), True)
self.assertEqual(MRJob(['--reducer']).is_mapper_or_reducer(), True)
self.assertEqual(MRJob(['--combiner']).is_mapper_or_reducer(), True)
self.assertEqual(MRJob(['--steps']).is_mapper_or_reducer(), False)
class StepNumTestCase(TestCase):
def test_two_step_job_end_to_end(self):
# represent input as a list so we can reuse it
# also, leave off newline (MRJobRunner should fix it)
mapper0_input_lines = [b'foo', b'bar']
def test_mapper0(mr_job, input_lines):
mr_job.sandbox(input_lines)
mr_job.run_mapper(0)
self.assertEqual(mr_job.stdout.getvalue(),
b'null\t"foo"\n' + b'"foo"\tnull\n' +
b'null\t"bar"\n' + b'"bar"\tnull\n')
mapper0 = MRTwoStepJob()
test_mapper0(mapper0, mapper0_input_lines)
# --step-num=0 shouldn't actually be necessary
mapper0_no_step_num = MRTwoStepJob(['--mapper'])
test_mapper0(mapper0_no_step_num, mapper0_input_lines)
# sort output of mapper0
mapper0_output_input_lines = BytesIO(mapper0.stdout.getvalue())
reducer0_input_lines = sorted(mapper0_output_input_lines,
key=lambda line: line.split(b'\t'))
def test_reducer0(mr_job, input_lines):
mr_job.sandbox(input_lines)
mr_job.run_reducer(0)
self.assertEqual(mr_job.stdout.getvalue(),
b'"bar"\t1\n' + b'"foo"\t1\n' + b'null\t2\n')
reducer0 = MRTwoStepJob()
test_reducer0(reducer0, reducer0_input_lines)
# --step-num=0 shouldn't actually be necessary
reducer0_no_step_num = MRTwoStepJob(['--reducer'])
test_reducer0(reducer0_no_step_num, reducer0_input_lines)
# mapper can use reducer0's output as-is
mapper1_input_lines = BytesIO(reducer0.stdout.getvalue())
def test_mapper1(mr_job, input_lines):
mr_job.sandbox(input_lines)
mr_job.run_mapper(1)
self.assertEqual(mr_job.stdout.getvalue(),
b'1\t"bar"\n' + b'1\t"foo"\n' + b'2\tnull\n')
mapper1 = MRTwoStepJob()
test_mapper1(mapper1, mapper1_input_lines)
def test_nonexistent_steps(self):
mr_job = MRTwoStepJob()
mr_job.sandbox()
self.assertRaises(ValueError, mr_job.run_reducer, 1)
self.assertRaises(ValueError, mr_job.run_mapper, 2)
self.assertRaises(ValueError, mr_job.run_reducer, -1)
class FileOptionsTestCase(SandboxedTestCase):
def test_end_to_end(self):
n_file_path = os.path.join(self.tmp_dir, 'n_file')
with open(n_file_path, 'w') as f:
f.write('3')
os.environ['LOCAL_N_FILE_PATH'] = n_file_path
stdin = [b'0\n', b'1\n', b'2\n']
# use local runner so that the file is actually sent somewhere
mr_job = MRTowerOfPowers(
['-v', '--cleanup=NONE', '--n-file', n_file_path,
'--runner=local'])
self.assertEqual(len(mr_job.steps()), 3)
mr_job.sandbox(stdin=stdin)
with logger_disabled('mrjob.local'):
with mr_job.make_runner() as runner:
# make sure our file gets placed in the working dir
self.assertIn(n_file_path, runner._working_dir_mgr.paths())
runner.run()
output = set()
for line in runner.stream_output():
_, value = mr_job.parse_output_line(line)
output.add(value)
self.assertEqual(set(output), set([0, 1, ((2 ** 3) ** 3) ** 3]))
class RunJobTestCase(SandboxedTestCase):
def run_job(self, args=()):
args = ([sys.executable, MRTwoStepJob.mr_job_script()] +
list(args) + ['--no-conf'])
# add . to PYTHONPATH (in case mrjob isn't actually installed)
env = combine_envs(os.environ,
{'PYTHONPATH': os.path.abspath('.')})
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
stdout, stderr = proc.communicate(input=b'foo\nbar\nbar\n')
return stdout, stderr, proc.returncode
def test_quiet(self):
stdout, stderr, returncode = self.run_job(['-q'])
self.assertEqual(sorted(BytesIO(stdout)),
[b'1\t"foo"\n', b'2\t"bar"\n', b'3\tnull\n'])
self.assertEqual(stderr, b'')
self.assertEqual(returncode, 0)
def test_verbose(self):
stdout, stderr, returncode = self.run_job()
self.assertEqual(sorted(BytesIO(stdout)),
[b'1\t"foo"\n', b'2\t"bar"\n', b'3\tnull\n'])
self.assertNotEqual(stderr, '')
self.assertEqual(returncode, 0)
normal_stderr = stderr
stdout, stderr, returncode = self.run_job(['-v'])
self.assertEqual(sorted(BytesIO(stdout)),
[b'1\t"foo"\n', b'2\t"bar"\n', b'3\tnull\n'])
self.assertNotEqual(stderr, b'')
self.assertEqual(returncode, 0)
self.assertGreater(len(stderr), len(normal_stderr))
def test_no_output(self):
self.assertEqual(os.listdir(self.tmp_dir), []) # sanity check
args = ['--no-output', '--output-dir', self.tmp_dir]
stdout, stderr, returncode = self.run_job(args)
self.assertEqual(stdout, b'')
self.assertNotEqual(stderr, b'')
self.assertEqual(returncode, 0)
# make sure the correct output is in the temp dir
self.assertNotEqual(os.listdir(self.tmp_dir), [])
output_lines = []
for dirpath, _, filenames in os.walk(self.tmp_dir):
for filename in filenames:
with open(os.path.join(dirpath, filename), 'rb') as output_f:
output_lines.extend(output_f)
self.assertEqual(sorted(output_lines),
[b'1\t"foo"\n', b'2\t"bar"\n', b'3\tnull\n'])
class BadMainTestCase(TestCase):
"""Ensure that the user cannot do anything but just call MRYourJob.run()
from __main__()"""
def test_bad_main_catch(self):
sys.argv.append('--mapper')
self.assertRaises(UsageError, MRBoringJob().make_runner)
sys.argv = sys.argv[:-1]
class ProtocolTypeTestCase(TestCase):
class StrangeJob(MRJob):
def INPUT_PROTOCOL(self):
return JSONProtocol()
def INTERNAL_PROTOCOL(self):
return JSONProtocol()
def OUTPUT_PROTOCOL(self):
return JSONProtocol()
def test_attrs_should_be_classes(self):
with no_handlers_for_logger('mrjob.job'):
stderr = StringIO()
log_to_stream('mrjob.job', stderr)
job = self.StrangeJob()
self.assertIsInstance(job.input_protocol(), JSONProtocol)
self.assertIsInstance(job.internal_protocol(), JSONProtocol)
self.assertIsInstance(job.output_protocol(), JSONProtocol)
logs = stderr.getvalue()
self.assertIn('INPUT_PROTOCOL should be a class', logs)
self.assertIn('INTERNAL_PROTOCOL should be a class', logs)
self.assertIn('OUTPUT_PROTOCOL should be a class', logs)
class StepsTestCase(TestCase):
class SteppyJob(MRJob):
def _yield_none(self, *args, **kwargs):
yield None
def steps(self):
return [
MRStep(mapper_init=self._yield_none, mapper_pre_filter='cat',
reducer_cmd='wc -l'),
JarStep(jar='s3://bookat/binks_jar.jar')]
class SingleSteppyCommandJob(MRJob):
def mapper_cmd(self):
return 'cat'
def combiner_cmd(self):
return 'cat'
def reducer_cmd(self):
return 'wc -l'
class SingleStepJobConfMethodJob(MRJob):
def mapper(self, key, value):
return None
def jobconf(self):
return {'mapred.baz': 'bar'}
def test_steps(self):
j = self.SteppyJob(['--no-conf'])
self.assertEqual(
j.steps()[0],
MRStep(
mapper_init=j._yield_none,
mapper_pre_filter='cat',
reducer_cmd='wc -l'))
self.assertEqual(
j.steps()[1], JarStep(jar='s3://bookat/binks_jar.jar'))
def test_cmd_steps(self):
j = self.SingleSteppyCommandJob(['--no-conf'])
self.assertEqual(
j._steps_desc(),
[{
'type': 'streaming',
'mapper': {'type': 'command', 'command': 'cat'},
'combiner': {'type': 'command', 'command': 'cat'},
'reducer': {'type': 'command', 'command': 'wc -l'}}])
def test_can_override_jobconf_method(self):
# regression test for #656
j = self.SingleStepJobConfMethodJob(['--no-conf'])
# overriding jobconf() should affect job_runner_kwargs()
# but not step definitions
self.assertEqual(j.job_runner_kwargs()['jobconf'],
{'mapred.baz': 'bar'})
self.assertEqual(
j.steps()[0],
MRStep(mapper=j.mapper))
class DeprecatedMRMethodTestCase(TestCase):
def test_mr(self):
kwargs = {
'mapper': _IDENTITY_MAPPER,
'reducer': _IDENTITY_REDUCER,
}
with logger_disabled('mrjob.job'):
self.assertEqual(MRJob.mr(**kwargs), MRStep(**kwargs))
| []
| []
| [
"LOCAL_N_FILE_PATH"
]
| [] | ["LOCAL_N_FILE_PATH"] | python | 1 | 0 | |
vultrauth/auth.go | package vultrauth
import (
"encoding/json"
"fmt"
"log"
"os"
)
//Configuration struct for representing Vultr API Key
type Configuration struct {
VultrAPIKey string
}
//Config from vultrconfig.json
var Config Configuration
//LoadConfig represents Load Vultr config from vultrconfig.json or environment variables
func LoadConfig() {
// Read from file first.
var home string = os.Getenv("HOME")
file, err := os.Open(home + "/.gocloud" + "/vultrconfig.json")
if err != nil {
fmt.Println(err)
return
}
// Defer the closing of our jsonFile so that we can parse it later on
defer file.Close()
// We initialize Configuration struct
decoder := json.NewDecoder(file)
Config = Configuration{}
_ = decoder.Decode(&Config)
if Config.VultrAPIKey == "" {
// If vultrconfig.json doesn't exist, look for credentials as environment variables.
Config.VultrAPIKey = os.Getenv("VultrAPIKey")
if Config.VultrAPIKey == "" {
log.Fatalln("Cannot Get Vultr API Key")
}
}
}
| [
"\"HOME\"",
"\"VultrAPIKey\""
]
| []
| [
"VultrAPIKey",
"HOME"
]
| [] | ["VultrAPIKey", "HOME"] | go | 2 | 0 | |
pkg/client/clientcmd/client_builder_test.go | /*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strings"
"testing"
"github.com/spf13/pflag"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/clientauth"
)
func TestSetAllArgumentsOnly(t *testing.T) {
flags := pflag.NewFlagSet("test-flags", pflag.ContinueOnError)
clientBuilder := NewBuilder(nil)
clientBuilder.BindFlags(flags)
args := argValues{"https://localhost:8080", "v1beta1", "/auth-path", "cert-file", "key-file", "ca-file", "bearer-token", true, true}
flags.Parse(strings.Split(args.toArguments(), " "))
castBuilder, ok := clientBuilder.(*builder)
if !ok {
t.Errorf("Got unexpected cast result: %#v", castBuilder)
}
matchStringArg(args.server, castBuilder.apiserver, t)
matchStringArg(args.apiVersion, castBuilder.apiVersion, t)
matchStringArg(args.authPath, castBuilder.authPath, t)
matchStringArg(args.certFile, castBuilder.cmdAuthInfo.CertFile.Value, t)
matchStringArg(args.keyFile, castBuilder.cmdAuthInfo.KeyFile.Value, t)
matchStringArg(args.caFile, castBuilder.cmdAuthInfo.CAFile.Value, t)
matchStringArg(args.bearerToken, castBuilder.cmdAuthInfo.BearerToken.Value, t)
matchBoolArg(args.insecure, castBuilder.cmdAuthInfo.Insecure.Value, t)
matchBoolArg(args.matchApiVersion, castBuilder.matchApiVersion, t)
clientConfig, err := clientBuilder.Config()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
matchStringArg(args.server, clientConfig.Host, t)
matchStringArg(args.apiVersion, clientConfig.Version, t)
matchStringArg(args.certFile, clientConfig.CertFile, t)
matchStringArg(args.keyFile, clientConfig.KeyFile, t)
matchStringArg(args.caFile, clientConfig.CAFile, t)
matchStringArg(args.bearerToken, clientConfig.BearerToken, t)
matchBoolArg(args.insecure, clientConfig.Insecure, t)
}
func TestSetInsecureArgumentsOnly(t *testing.T) {
flags := pflag.NewFlagSet("test-flags", pflag.ContinueOnError)
clientBuilder := NewBuilder(nil)
clientBuilder.BindFlags(flags)
args := argValues{"http://localhost:8080", "v1beta1", "/auth-path", "cert-file", "key-file", "ca-file", "bearer-token", true, true}
flags.Parse(strings.Split(args.toArguments(), " "))
clientConfig, err := clientBuilder.Config()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
matchStringArg(args.server, clientConfig.Host, t)
matchStringArg(args.apiVersion, clientConfig.Version, t)
// all security related params should be empty in the resulting config even though we set them because we're using http transport
matchStringArg("", clientConfig.CertFile, t)
matchStringArg("", clientConfig.KeyFile, t)
matchStringArg("", clientConfig.CAFile, t)
matchStringArg("", clientConfig.BearerToken, t)
matchBoolArg(false, clientConfig.Insecure, t)
}
func TestReadAuthFile(t *testing.T) {
flags := pflag.NewFlagSet("test-flags", pflag.ContinueOnError)
clientBuilder := NewBuilder(nil)
clientBuilder.BindFlags(flags)
authFileContents := fmt.Sprintf(`{"user": "alfa-user", "password": "bravo-password", "cAFile": "charlie", "certFile": "delta", "keyFile": "echo", "bearerToken": "foxtrot"}`)
authFile := writeTempAuthFile(authFileContents, t)
args := argValues{"https://localhost:8080", "v1beta1", authFile, "", "", "", "", true, true}
flags.Parse(strings.Split(args.toArguments(), " "))
castBuilder, ok := clientBuilder.(*builder)
if !ok {
t.Errorf("Got unexpected cast result: %#v", castBuilder)
}
matchStringArg(args.server, castBuilder.apiserver, t)
matchStringArg(args.apiVersion, castBuilder.apiVersion, t)
matchStringArg(args.authPath, castBuilder.authPath, t)
matchStringArg(args.certFile, castBuilder.cmdAuthInfo.CertFile.Value, t)
matchStringArg(args.keyFile, castBuilder.cmdAuthInfo.KeyFile.Value, t)
matchStringArg(args.caFile, castBuilder.cmdAuthInfo.CAFile.Value, t)
matchStringArg(args.bearerToken, castBuilder.cmdAuthInfo.BearerToken.Value, t)
matchBoolArg(args.insecure, castBuilder.cmdAuthInfo.Insecure.Value, t)
matchBoolArg(args.matchApiVersion, castBuilder.matchApiVersion, t)
clientConfig, err := clientBuilder.Config()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
matchStringArg(args.server, clientConfig.Host, t)
matchStringArg(args.apiVersion, clientConfig.Version, t)
matchStringArg("delta", clientConfig.CertFile, t)
matchStringArg("echo", clientConfig.KeyFile, t)
matchStringArg("charlie", clientConfig.CAFile, t)
matchStringArg("foxtrot", clientConfig.BearerToken, t)
matchStringArg("alfa-user", clientConfig.Username, t)
matchStringArg("bravo-password", clientConfig.Password, t)
matchBoolArg(args.insecure, clientConfig.Insecure, t)
}
func TestAuthFileOverridden(t *testing.T) {
flags := pflag.NewFlagSet("test-flags", pflag.ContinueOnError)
clientBuilder := NewBuilder(nil)
clientBuilder.BindFlags(flags)
authFileContents := fmt.Sprintf(`{"user": "alfa-user", "password": "bravo-password", "cAFile": "charlie", "certFile": "delta", "keyFile": "echo", "bearerToken": "foxtrot"}`)
authFile := writeTempAuthFile(authFileContents, t)
args := argValues{"https://localhost:8080", "v1beta1", authFile, "cert-file", "key-file", "ca-file", "bearer-token", true, true}
flags.Parse(strings.Split(args.toArguments(), " "))
castBuilder, ok := clientBuilder.(*builder)
if !ok {
t.Errorf("Got unexpected cast result: %#v", castBuilder)
}
matchStringArg(args.server, castBuilder.apiserver, t)
matchStringArg(args.apiVersion, castBuilder.apiVersion, t)
matchStringArg(args.authPath, castBuilder.authPath, t)
matchStringArg(args.certFile, castBuilder.cmdAuthInfo.CertFile.Value, t)
matchStringArg(args.keyFile, castBuilder.cmdAuthInfo.KeyFile.Value, t)
matchStringArg(args.caFile, castBuilder.cmdAuthInfo.CAFile.Value, t)
matchStringArg(args.bearerToken, castBuilder.cmdAuthInfo.BearerToken.Value, t)
matchBoolArg(args.insecure, castBuilder.cmdAuthInfo.Insecure.Value, t)
matchBoolArg(args.matchApiVersion, castBuilder.matchApiVersion, t)
clientConfig, err := clientBuilder.Config()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
matchStringArg(args.server, clientConfig.Host, t)
matchStringArg(args.apiVersion, clientConfig.Version, t)
matchStringArg(args.certFile, clientConfig.CertFile, t)
matchStringArg(args.keyFile, clientConfig.KeyFile, t)
matchStringArg(args.caFile, clientConfig.CAFile, t)
matchStringArg(args.bearerToken, clientConfig.BearerToken, t)
matchStringArg("alfa-user", clientConfig.Username, t)
matchStringArg("bravo-password", clientConfig.Password, t)
matchBoolArg(args.insecure, clientConfig.Insecure, t)
}
func TestUseDefaultArgumentsOnly(t *testing.T) {
flags := pflag.NewFlagSet("test-flags", pflag.ContinueOnError)
clientBuilder := NewBuilder(nil)
clientBuilder.BindFlags(flags)
flags.Parse(strings.Split("", " "))
castBuilder, ok := clientBuilder.(*builder)
if !ok {
t.Errorf("Got unexpected cast result: %#v", castBuilder)
}
matchStringArg("", castBuilder.apiserver, t)
matchStringArg(latest.Version, castBuilder.apiVersion, t)
matchStringArg(os.Getenv("HOME")+"/.kubernetes_auth", castBuilder.authPath, t)
matchStringArg("", castBuilder.cmdAuthInfo.CertFile.Value, t)
matchStringArg("", castBuilder.cmdAuthInfo.KeyFile.Value, t)
matchStringArg("", castBuilder.cmdAuthInfo.CAFile.Value, t)
matchStringArg("", castBuilder.cmdAuthInfo.BearerToken.Value, t)
matchBoolArg(false, castBuilder.matchApiVersion, t)
}
func TestLoadClientAuthInfoOrPrompt(t *testing.T) {
loadAuthInfoTests := []struct {
authData string
authInfo *clientauth.Info
r io.Reader
}{
{
`{"user": "user", "password": "pass"}`,
&clientauth.Info{User: "user", Password: "pass"},
nil,
},
{
"", nil, nil,
},
{
"missing",
&clientauth.Info{User: "user", Password: "pass"},
bytes.NewBufferString("user\npass"),
},
}
for _, loadAuthInfoTest := range loadAuthInfoTests {
tt := loadAuthInfoTest
aifile, err := ioutil.TempFile("", "testAuthInfo")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if tt.authData != "missing" {
defer os.Remove(aifile.Name())
defer aifile.Close()
_, err = aifile.WriteString(tt.authData)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
} else {
aifile.Close()
os.Remove(aifile.Name())
}
prompter := NewPromptingAuthLoader(tt.r)
authInfo, err := prompter.LoadAuth(aifile.Name())
if len(tt.authData) == 0 && tt.authData != "missing" {
if err == nil {
t.Error("LoadAuth didn't fail on empty file")
}
continue
}
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !reflect.DeepEqual(authInfo, tt.authInfo) {
t.Errorf("Expected %#v, got %#v", tt.authInfo, authInfo)
}
}
}
func TestOverride(t *testing.T) {
b := NewBuilder(nil)
cfg, err := b.Config()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if cfg.Version != "" {
t.Errorf("unexpected default config version")
}
newCfg, err := b.Override(func(cfg *client.Config) {
if cfg.Version != "" {
t.Errorf("unexpected default config version")
}
cfg.Version = "test"
}).Config()
if newCfg.Version != "test" {
t.Errorf("unexpected override config version")
}
if cfg.Version != "" {
t.Errorf("original object should not change")
}
cfg, err = b.Config()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if cfg.Version != "" {
t.Errorf("override should not be persistent")
}
}
func matchStringArg(expected, got string, t *testing.T) {
if expected != got {
t.Errorf("Expected %v, got %v", expected, got)
}
}
func matchBoolArg(expected, got bool, t *testing.T) {
if expected != got {
t.Errorf("Expected %v, got %v", expected, got)
}
}
func writeTempAuthFile(contents string, t *testing.T) string {
file, err := ioutil.TempFile("", "testAuthInfo")
if err != nil {
t.Errorf("Failed to write config file. Test cannot continue due to: %v", err)
return ""
}
_, err = file.WriteString(contents)
if err != nil {
t.Errorf("Unexpected error: %v", err)
return ""
}
file.Close()
return file.Name()
}
type argValues struct {
server string
apiVersion string
authPath string
certFile string
keyFile string
caFile string
bearerToken string
insecure bool
matchApiVersion bool
}
func (a *argValues) toArguments() string {
args := ""
if len(a.server) > 0 {
args += "--" + FlagApiServer + "=" + a.server + " "
}
if len(a.apiVersion) > 0 {
args += "--" + FlagApiVersion + "=" + a.apiVersion + " "
}
if len(a.authPath) > 0 {
args += "--" + FlagAuthPath + "=" + a.authPath + " "
}
if len(a.certFile) > 0 {
args += "--" + FlagCertFile + "=" + a.certFile + " "
}
if len(a.keyFile) > 0 {
args += "--" + FlagKeyFile + "=" + a.keyFile + " "
}
if len(a.caFile) > 0 {
args += "--" + FlagCAFile + "=" + a.caFile + " "
}
if len(a.bearerToken) > 0 {
args += "--" + FlagBearerToken + "=" + a.bearerToken + " "
}
args += "--" + FlagInsecure + "=" + fmt.Sprintf("%v", a.insecure) + " "
args += "--" + FlagMatchApiVersion + "=" + fmt.Sprintf("%v", a.matchApiVersion) + " "
return args
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
jupyterhub/jupyterhub_config.py | # JupyterHub configuration
#
## If you update this file, do not forget to delete the `jupyterhub_data` volume before restarting the jupyterhub service:
##
## docker volume rm jupyterhub_jupyterhub_data
##
## or, if you changed the COMPOSE_PROJECT_NAME to <name>:
##
## docker volume rm <name>_jupyterhub_data
##
import os
## Generic
c.JupyterHub.admin_access = True
c.Spawner.default_url = '/lab'
## Authenticator
from jhub_cas_authenticator.cas_auth import CASAuthenticator
c.JupyterHub.authenticator_class = CASAuthenticator
# The CAS URLs to redirect (un)authenticated users to.
#c.CASAuthenticator.cas_login_url = 'https://cas.uvsq.fr/login'
#c.CASLocalAuthenticator.cas_logout_url = 'https://cas.uvsq/logout'
c.CASAuthenticator.cas_login_url = 'https://auth.univ-paris-diderot.fr/cas/login'
c.CASAuthenticator.cas_logout_url = 'https://auth.univ-paris-diderot.fr/cas/logout'
# The CAS endpoint for validating service tickets.
c.CASAuthenticator.cas_servive_validate_url = 'https://auth.univ-paris-diderot.fr/cas/serviceValidate'
# The service URL the CAS server will redirect the browser back to on successful authentication.
c.CASAuthenticator.cas_service_url = 'https://%s/hub/login' % os.environ['HOST']
c.Authenticator.admin_users = { 'molin' }
## Docker spawner
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.DockerSpawner.image = os.environ['DOCKER_JUPYTER_CONTAINER']
c.DockerSpawner.network_name = os.environ['DOCKER_NETWORK_NAME']
# See https://github.com/jupyterhub/dockerspawner/blob/master/examples/oauth/jupyterhub_config.py
c.JupyterHub.hub_ip = os.environ['HUB_IP']
# user data persistence
# see https://github.com/jupyterhub/dockerspawner#data-persistence-and-dockerspawner
notebook_dir = os.environ.get('DOCKER_NOTEBOOK_DIR') or '/home/jovyan'
c.DockerSpawner.notebook_dir = notebook_dir
c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir }
# Other stuff
c.Spawner.cpu_limit = 1
c.Spawner.mem_limit = '10G'
## Services
c.JupyterHub.services = [
{
'name': 'cull_idle',
'admin': True,
'command': 'python /srv/jupyterhub/cull_idle_servers.py --timeout=3600'.split(),
},
]
| []
| []
| [
"HOST",
"DOCKER_NETWORK_NAME",
"DOCKER_JUPYTER_CONTAINER",
"HUB_IP",
"DOCKER_NOTEBOOK_DIR"
]
| [] | ["HOST", "DOCKER_NETWORK_NAME", "DOCKER_JUPYTER_CONTAINER", "HUB_IP", "DOCKER_NOTEBOOK_DIR"] | python | 5 | 0 | |
ch02/statistics/03_histogram.go | package main
import (
"fmt"
"log"
"os"
"path/filepath"
"github.com/go-gota/gota/dataframe"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/vg"
)
var (
fileName = "iris.csv"
filePath = filepath.Join(os.Getenv("MLGO"), "storage", "data", fileName)
suffix = "_hist"
)
func main() {
f, err := os.Open(filePath)
if err != nil {
log.Fatal(err)
}
defer f.Close()
df := dataframe.ReadCSV(f)
for _, colName := range df.Names() {
if colName != "species" {
v := make(plotter.Values, df.Nrow())
for i, floatVal := range df.Col(colName).Float() {
v[i] = floatVal
}
p, err := plot.New()
if err != nil {
log.Fatal(err)
}
p.Title.Text = fmt.Sprintf("Histogram of a %s", colName)
h, err := plotter.NewHist(v, 16)
if err != nil {
log.Fatal(err)
}
h.Normalize(1)
p.Add(h)
if err := p.Save(4*vg.Inch, 4*vg.Inch, plotPath(colName)); err != nil {
log.Fatal(err)
}
}
}
}
func plotPath(name string) string {
saveName := name + suffix + ".png"
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
savePath := filepath.Join(dir, "result", saveName)
return savePath
}
| [
"\"MLGO\""
]
| []
| [
"MLGO"
]
| [] | ["MLGO"] | go | 1 | 0 | |
meson_post_install.py | #!/usr/bin/env python3
import os
import subprocess
schemadir = os.path.join(os.environ["MESON_INSTALL_PREFIX"], "share", "glib-2.0", "schemas")
if not os.environ.get("DESTDIR"):
print("Compiling gsettings schemas...")
subprocess.call(["glib-compile-schemas", schemadir])
| []
| []
| [
"MESON_INSTALL_PREFIX",
"DESTDIR"
]
| [] | ["MESON_INSTALL_PREFIX", "DESTDIR"] | python | 2 | 0 | |
packages/augur-ui/support/dnslink-cloudflare.py | #!/usr/bin/env python3
import argparse
import json
import os
import requests
import sys
try:
os.environ['CF_API_KEY']
except KeyError:
print('CF_API_KEY not set')
sys.exit(1)
try:
os.environ['CF_API_EMAIL']
except KeyError:
print('CF_API_EMAIL is not set')
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--domain', help="sets domain")
parser.add_argument('-l', '--link', help="ipfs sha")
parser.add_argument('-r', '--record', help="Domain record name")
args = parser.parse_args()
cf_api_key = os.environ['CF_API_KEY']
cf_api_email = os.environ['CF_API_EMAIL']
domain = args.domain
ipfs_path = 'dnslink=/ipfs/{}'.format(args.link)
headers = {
'X-Auth-Email': '{}'.format(cf_api_email),
'X-Auth-Key': '{}'.format(cf_api_key),
'Content-Type': 'application/json'
}
# gets account id
def query_account_id():
url = 'https://api.cloudflare.com/client/v4/user'
r = requests.get(url, headers=headers)
userdata = r.json()
# account_id = userdata['result']['id']
return userdata['result']['id']
# gets zone id
def query_zone_id(domain):
url = 'https://api.cloudflare.com/client/v4/zones?match=all'
zone_request = requests.get(url, headers=headers)
for zone in zone_request.json()['result']:
if zone['name'] == domain:
return zone['id']
def query_dns_records(zone_id):
url = 'https://api.cloudflare.com/client/v4/zones/{}/dns_records?type=TXT'.format(zone_id)
dns_records = requests.get(url, headers=headers)
return dns_records.json()['result']
def dns_record_update(domain_zone_id, record_id, record_name):
print('updating record')
url = 'https://api.cloudflare.com/client/v4/zones/{}/dns_records/{}'.format(domain_zone_id,record_id)
data = {
'type': 'TXT',
'name': record_name,
'content': ipfs_path,
'ttl': 120
}
record_update = requests.put(url,
headers=headers,
data=json.dumps(data)
)
print(record_update.content)
if not record_update.status_code == requests.codes.ok:
print(record_update.content)
record_update.raise_for_status()
def dns_record_create(domain_zone_id, record_name, content):
print('creating record')
url = 'https://api.cloudflare.com/client/v4/zones/{}/dns_records'.format(domain_zone_id)
data = {
'type': 'TXT',
'name': '{}'.format(record_name),
'content': '{}'.format(content),
'ttl': 120
}
record_create = requests.post(url,
headers=headers,
data=json.dumps(data)
)
if not record_create.status_code == requests.codes.ok:
print(record_create.content)
record_create.raise_for_status()
domain_zone_id = query_zone_id(args.domain)
dns_records = query_dns_records(domain_zone_id)
record_exists = False
for record in dns_records:
record_id = record['id']
record_name = record['name']
if record['name'].startswith(args.record):
dns_record_update(domain_zone_id, record_id, record_name)
record_exists = True
if record_exists is False:
record_name = args.record + '.' + args.domain
content = ipfs_path
print('creating record: {} with {}'.format(record_name,content))
dns_record_create(domain_zone_id, record_name, content)
| []
| []
| [
"CF_API_KEY",
"CF_API_EMAIL"
]
| [] | ["CF_API_KEY", "CF_API_EMAIL"] | python | 2 | 0 | |
logs/plugin_bot.go | package logs
import (
"context"
"database/sql"
"fmt"
"os"
"time"
"emperror.dev/errors"
"github.com/jonas747/yagpdb/bot/paginatedmessages"
"github.com/jonas747/dcmd"
"github.com/jonas747/discordgo"
"github.com/jonas747/dstate"
"github.com/jonas747/yagpdb/bot"
"github.com/jonas747/yagpdb/bot/eventsystem"
"github.com/jonas747/yagpdb/commands"
"github.com/jonas747/yagpdb/common"
"github.com/jonas747/yagpdb/logs/models"
"github.com/volatiletech/null"
"github.com/volatiletech/sqlboiler/boil"
)
var _ bot.BotInitHandler = (*Plugin)(nil)
var _ commands.CommandProvider = (*Plugin)(nil)
func (p *Plugin) AddCommands() {
commands.AddRootCommands(cmdLogs, cmdWhois, cmdNicknames, cmdUsernames, cmdMigrate)
}
func (p *Plugin) BotInit() {
eventsystem.AddHandlerAsyncLastLegacy(p, bot.ConcurrentEventHandler(HandleQueueEvt), eventsystem.EventGuildMemberUpdate, eventsystem.EventGuildMemberAdd, eventsystem.EventMemberFetched)
// eventsystem.AddHandlerAsyncLastLegacy(bot.ConcurrentEventHandler(HandleGC), eventsystem.EventGuildCreate)
eventsystem.AddHandlerAsyncLast(p, HandleMsgDelete, eventsystem.EventMessageDelete, eventsystem.EventMessageDeleteBulk)
eventsystem.AddHandlerFirstLegacy(p, HandlePresenceUpdate, eventsystem.EventPresenceUpdate)
go EvtProcesser()
go EvtProcesserGCs()
}
var cmdLogs = &commands.YAGCommand{
Cooldown: 5,
CmdCategory: commands.CategoryTool,
Name: "Logs",
Aliases: []string{"log"},
Description: "Creates a log of the last messages in the current channel.",
LongDescription: "This includes deleted messages within an hour (or 12 hours for premium servers)",
Arguments: []*dcmd.ArgDef{
&dcmd.ArgDef{Name: "Count", Default: 100, Type: &dcmd.IntArg{Min: 2, Max: 250}},
},
RunFunc: func(cmd *dcmd.Data) (interface{}, error) {
num := cmd.Args[0].Int()
l, err := CreateChannelLog(cmd.Context(), nil, cmd.GS.ID, cmd.CS.ID, cmd.Msg.Author.Username, cmd.Msg.Author.ID, num)
if err != nil {
if err == ErrChannelBlacklisted {
return "This channel is blacklisted from creating message logs, this can be changed in the control panel.", nil
}
return "", err
}
return CreateLink(cmd.GS.ID, l.ID), err
},
}
var cmdWhois = &commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Whois",
Description: "Shows information about a user",
Aliases: []string{"whoami"},
RunInDM: false,
Arguments: []*dcmd.ArgDef{
{Name: "User", Type: &commands.MemberArg{}},
},
RunFunc: func(parsed *dcmd.Data) (interface{}, error) {
config, err := GetConfig(common.PQ, parsed.Context(), parsed.GS.ID)
if err != nil {
return nil, err
}
member := commands.ContextMS(parsed.Context())
if parsed.Args[0].Value != nil {
member = parsed.Args[0].Value.(*dstate.MemberState)
}
nick := ""
if member.Nick != "" {
nick = " (" + member.Nick + ")"
}
joinedAtStr := ""
joinedAtDurStr := ""
if !member.MemberSet {
joinedAtStr = "Couldn't find out"
joinedAtDurStr = "Couldn't find out"
} else {
joinedAtStr = member.JoinedAt.UTC().Format(time.RFC822)
dur := time.Since(member.JoinedAt)
joinedAtDurStr = common.HumanizeDuration(common.DurationPrecisionHours, dur)
}
if joinedAtDurStr == "" {
joinedAtDurStr = "Less than an hour ago"
}
t := bot.SnowflakeToTime(member.ID)
createdDurStr := common.HumanizeDuration(common.DurationPrecisionHours, time.Since(t))
if createdDurStr == "" {
createdDurStr = "Less than an hour ago"
}
embed := &discordgo.MessageEmbed{
Title: fmt.Sprintf("%s#%04d%s", member.Username, member.Discriminator, nick),
Fields: []*discordgo.MessageEmbedField{
&discordgo.MessageEmbedField{
Name: "ID",
Value: discordgo.StrID(member.ID),
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Avatar",
Value: "[Link](" + discordgo.EndpointUserAvatar(member.ID, member.StrAvatar()) + ")",
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Account Created",
Value: t.UTC().Format(time.RFC822),
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Account Age",
Value: createdDurStr,
Inline: true,
},
&discordgo.MessageEmbedField{
Name: "Joined Server At",
Value: joinedAtStr,
Inline: true,
}, &discordgo.MessageEmbedField{
Name: "Join Server Age",
Value: joinedAtDurStr,
Inline: true,
},
},
Thumbnail: &discordgo.MessageEmbedThumbnail{
URL: discordgo.EndpointUserAvatar(member.ID, member.StrAvatar()),
},
}
if config.UsernameLoggingEnabled.Bool {
usernames, err := GetUsernames(parsed.Context(), member.ID, 5, 0)
if err != nil {
return err, err
}
usernamesStr := "```\n"
for _, v := range usernames {
usernamesStr += fmt.Sprintf("%20s: %s\n", v.CreatedAt.Time.UTC().Format(time.RFC822), v.Username.String)
}
usernamesStr += "```"
embed.Fields = append(embed.Fields, &discordgo.MessageEmbedField{
Name: "5 last usernames",
Value: usernamesStr,
})
} else {
embed.Fields = append(embed.Fields, &discordgo.MessageEmbedField{
Name: "Usernames",
Value: "Username tracking disabled",
})
}
if config.NicknameLoggingEnabled.Bool {
nicknames, err := GetNicknames(parsed.Context(), member.ID, parsed.GS.ID, 5, 0)
if err != nil {
return err, err
}
nicknameStr := "```\n"
if len(nicknames) < 1 {
nicknameStr += "No nicknames tracked"
} else {
for _, v := range nicknames {
nicknameStr += fmt.Sprintf("%20s: %s\n", v.CreatedAt.Time.UTC().Format(time.RFC822), v.Nickname.String)
}
}
nicknameStr += "```"
embed.Fields = append(embed.Fields, &discordgo.MessageEmbedField{
Name: "5 last nicknames",
Value: nicknameStr,
})
} else {
embed.Fields = append(embed.Fields, &discordgo.MessageEmbedField{
Name: "Nicknames",
Value: "Nickname tracking disabled",
})
}
return embed, nil
},
}
var cmdUsernames = &commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Usernames",
Description: "Shows past usernames of a user.",
Aliases: []string{"unames", "un"},
RunInDM: true,
Arguments: []*dcmd.ArgDef{
{Name: "User", Type: dcmd.User},
},
RunFunc: func(parsed *dcmd.Data) (interface{}, error) {
if parsed.GS != nil {
config, err := GetConfig(common.PQ, parsed.Context(), parsed.GS.ID)
if err != nil {
return nil, err
}
if !config.UsernameLoggingEnabled.Bool {
return "Username logging is disabled on this server", nil
}
}
_, err := paginatedmessages.CreatePaginatedMessage(parsed.GS.ID, parsed.CS.ID, 1, 0, func(p *paginatedmessages.PaginatedMessage, page int) (*discordgo.MessageEmbed, error) {
target := parsed.Msg.Author
if parsed.Args[0].Value != nil {
target = parsed.Args[0].Value.(*discordgo.User)
}
offset := (page - 1) * 15
usernames, err := GetUsernames(context.Background(), target.ID, 15, offset)
if err != nil {
return nil, err
}
if len(usernames) < 1 && page > 1 {
return nil, paginatedmessages.ErrNoResults
}
out := fmt.Sprintf("Past username of **%s#%s** ```\n", target.Username, target.Discriminator)
for _, v := range usernames {
out += fmt.Sprintf("%20s: %s\n", v.CreatedAt.Time.UTC().Format(time.RFC822), v.Username.String)
}
out += "```"
if len(usernames) < 1 {
out = `No logged usernames`
}
embed := &discordgo.MessageEmbed{
Color: 0x277ee3,
Title: "Usernames of " + target.Username + "#" + target.Discriminator,
Description: out,
}
return embed, nil
})
return nil, err
},
}
var cmdNicknames = &commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Nicknames",
Description: "Shows past nicknames of a user.",
Aliases: []string{"nn"},
RunInDM: false,
Arguments: []*dcmd.ArgDef{
{Name: "User", Type: dcmd.User},
},
RunFunc: func(parsed *dcmd.Data) (interface{}, error) {
config, err := GetConfig(common.PQ, parsed.Context(), parsed.GS.ID)
if err != nil {
return nil, err
}
target := parsed.Msg.Author
if parsed.Args[0].Value != nil {
target = parsed.Args[0].Value.(*discordgo.User)
}
if !config.NicknameLoggingEnabled.Bool {
return "Nickname logging is disabled on this server", nil
}
_, err = paginatedmessages.CreatePaginatedMessage(parsed.GS.ID, parsed.CS.ID, 1, 0, func(p *paginatedmessages.PaginatedMessage, page int) (*discordgo.MessageEmbed, error) {
offset := (page - 1) * 15
nicknames, err := GetNicknames(context.Background(), target.ID, parsed.GS.ID, 15, offset)
if err != nil {
return nil, err
}
if page > 1 && len(nicknames) < 1 {
return nil, paginatedmessages.ErrNoResults
}
out := fmt.Sprintf("Past nicknames of **%s#%s** ```\n", target.Username, target.Discriminator)
for _, v := range nicknames {
out += fmt.Sprintf("%20s: %s\n", v.CreatedAt.Time.UTC().Format(time.RFC822), v.Nickname.String)
}
out += "```"
if len(nicknames) < 1 {
out = `No nicknames tracked`
}
embed := &discordgo.MessageEmbed{
Color: 0x277ee3,
Title: "Nicknames of " + target.Username + "#" + target.Discriminator,
Description: out,
}
return embed, nil
})
return nil, err
},
}
// Mark all log messages with this id as deleted
func HandleMsgDelete(evt *eventsystem.EventData) (retry bool, err error) {
if evt.Type == eventsystem.EventMessageDelete {
err := markLoggedMessageAsDeleted(evt.Context(), evt.MessageDelete().ID)
if err != nil {
return true, errors.WithStackIf(err)
}
return false, nil
}
for _, m := range evt.MessageDeleteBulk().Messages {
err := markLoggedMessageAsDeleted(evt.Context(), m)
if err != nil {
return true, errors.WithStackIf(err)
}
}
return false, nil
}
func markLoggedMessageAsDeleted(ctx context.Context, mID int64) error {
_, err := models.Messages2s(models.Messages2Where.ID.EQ(mID)).UpdateAllG(ctx,
models.M{"deleted": true})
return err
}
func HandlePresenceUpdate(evt *eventsystem.EventData) {
pu := evt.PresenceUpdate()
gs := evt.GS
gs.RLock()
defer gs.RUnlock()
ms := gs.Member(false, pu.User.ID)
if ms == nil || !ms.PresenceSet || !ms.MemberSet {
queueEvt(pu)
return
}
if pu.User.Username != "" && pu.User.Username != ms.Username {
queueEvt(pu)
return
}
if pu.Nick != ms.Nick {
queueEvt(pu)
return
}
}
// While presence update is sent when user changes username.... MAKES NO SENSE IMO BUT WHATEVER
// Also check nickname incase the user came online
func HandleQueueEvt(evt *eventsystem.EventData) {
queueEvt(evt.EvtInterface)
}
func queueEvt(evt interface{}) {
if os.Getenv("YAGPDB_LOGS_DISABLE_USERNAME_TRACKING") != "" {
return
}
select {
case evtChan <- evt:
return
default:
go func() {
evtChan <- evt
}()
}
}
func HandleGC(evt *eventsystem.EventData) {
gc := evt.GuildCreate()
evtChanGC <- &LightGC{
GuildID: gc.ID,
Members: gc.Members,
}
}
// type UsernameListing struct {
// gorm.Model
// UserID int64 `gorm:"index"`
// Username string
// }
// type NicknameListing struct {
// gorm.Model
// UserID int64 `gorm:"index"`
// GuildID string
// Nickname string
// }
func CheckUsername(exec boil.ContextExecutor, ctx context.Context, usernameStmt *sql.Stmt, user *discordgo.User) error {
var lastUsername string
row := usernameStmt.QueryRow(user.ID)
err := row.Scan(&lastUsername)
if err == nil && lastUsername == user.Username {
// Not changed
return nil
}
if err != nil && err != sql.ErrNoRows {
// Other error
return nil
}
logger.Debug("User changed username, old:", lastUsername, " | new:", user.Username)
listing := &models.UsernameListing{
UserID: null.Int64From(user.ID),
Username: null.StringFrom(user.Username),
}
err = listing.Insert(ctx, exec, boil.Infer())
if err != nil {
logger.WithError(err).WithField("user", user.ID).Error("failed setting last username")
}
return err
}
func CheckNickname(exec boil.ContextExecutor, ctx context.Context, nicknameStmt *sql.Stmt, userID, guildID int64, nickname string) error {
var lastNickname string
row := nicknameStmt.QueryRow(userID, guildID)
err := row.Scan(&lastNickname)
if err == sql.ErrNoRows && nickname == "" {
// don't need to be putting this in the database as the first record for the user
return nil
}
if err == nil && lastNickname == nickname {
// Not changed
return nil
}
if err != sql.ErrNoRows && err != nil {
return err
}
logger.Debug("User changed nickname, old:", lastNickname, " | new:", nickname)
listing := &models.NicknameListing{
UserID: null.Int64From(userID),
GuildID: null.StringFrom(discordgo.StrID(guildID)),
Nickname: null.StringFrom(nickname),
}
err = listing.Insert(ctx, exec, boil.Infer())
if err != nil {
logger.WithError(err).WithField("guild", guildID).WithField("user", userID).Error("failed setting last nickname")
}
return err
}
// func CheckNicknameBulk(gDB *gorm.DB, guildID int64, members []*discordgo.Member) {
// ids := make([]int64, 0, len(members))
// for _, v := range members {
// ids = append(ids, v.User.ID)
// }
// rows, err := gDB.CommonDB().Query(
// "select distinct on(user_id) nickname,user_id from nickname_listings where user_id = ANY ($1) AND guild_id=$2 order by user_id,id desc;", pq.Int64Array(ids), guildID)
// if err != nil {
// logger.WithError(err).Error("Failed querying current nicknames")
// }
// // Value is wether the nickname was identical
// queriedUsers := make(map[int64]bool)
// for rows.Next() {
// var nickname string
// var userID int64
// err = rows.Scan(&nickname, &userID)
// if err != nil {
// logger.WithError(err).Error("Error while scanning")
// continue
// }
// for _, member := range members {
// if member.User.ID == userID {
// if member.Nick == nickname {
// // Already have the last username tracked
// queriedUsers[userID] = true
// } else {
// queriedUsers[userID] = false
// logger.Debug("CHANGED Nick: ", nickname, " : ", member.Nick)
// }
// break
// }
// }
// }
// rows.Close()
// for _, member := range members {
// unchanged, queried := queriedUsers[member.User.ID]
// if queried && unchanged {
// continue
// }
// if !queried && member.Nick == "" {
// // don't need to be putting this in the database as the first record for the user
// continue
// }
// logger.Debug("User changed nickname, new: ", member.Nick)
// listing := NicknameListing{
// UserID: member.User.ID,
// GuildID: discordgo.StrID(guildID),
// Nickname: member.Nick,
// }
// err = gDB.Create(&listing).Error
// if err != nil {
// logger.WithError(err).Error("Failed setting nickname")
// }
// }
// }
// func CheckUsernameBulk(gDB *gorm.DB, users []*discordgo.User) {
// ids := make([]int64, 0, len(users))
// for _, v := range users {
// ids = append(ids, v.ID)
// }
// rows, err := gDB.CommonDB().Query(
// "select distinct on(user_id) username,user_id from username_listings where user_id = ANY ($1) order by user_id,id desc;", pq.Int64Array(ids))
// if err != nil {
// logger.WithError(err).Error("Failed querying current usernames")
// }
// unchangedUsers := make(map[int64]bool)
// for rows.Next() {
// var username string
// var userID int64
// err = rows.Scan(&username, &userID)
// if err != nil {
// logger.WithError(err).Error("Error while scanning")
// continue
// }
// // var foundUser *discordgo.User
// for _, user := range users {
// if user.ID == userID {
// if user.Username == username {
// // Already have the last username tracked
// unchangedUsers[userID] = true
// }
// break
// }
// }
// }
// rows.Close()
// for _, user := range users {
// if unchanged, ok := unchangedUsers[user.ID]; ok && unchanged {
// continue
// }
// logger.Debug("User changed username, new: ", user.Username)
// listing := UsernameListing{
// UserID: user.ID,
// Username: user.Username,
// }
// err = gDB.Create(&listing).Error
// if err != nil {
// logger.WithError(err).Error("Failed setting username")
// }
// }
// }
var (
evtChan = make(chan interface{}, 1000)
evtChanGC = make(chan *LightGC)
)
type UserGuildPair struct {
GuildID int64
User *discordgo.User
}
// Queue up all the events and process them one by one, because of limited connections
func EvtProcesser() {
queuedMembers := make([]*discordgo.Member, 0)
queuedUsers := make([]*UserGuildPair, 0)
ticker := time.NewTicker(time.Second * 10)
for {
select {
case e := <-evtChan:
switch t := e.(type) {
case *discordgo.PresenceUpdate:
if t.User.Username == "" {
continue
}
queuedUsers = append(queuedUsers, &UserGuildPair{GuildID: t.GuildID, User: t.User})
case *discordgo.GuildMemberUpdate:
queuedMembers = append(queuedMembers, t.Member)
case *discordgo.GuildMemberAdd:
queuedMembers = append(queuedMembers, t.Member)
case *discordgo.Member:
queuedMembers = append(queuedMembers, t)
}
case <-ticker.C:
started := time.Now()
err := ProcessBatch(queuedUsers, queuedMembers)
logger.Debugf("Updated %d members and %d users in %s", len(queuedMembers), len(queuedUsers), time.Since(started).String())
if err == nil {
// reset the slices
queuedUsers = queuedUsers[:0]
queuedMembers = queuedMembers[:0]
} else {
logger.WithError(err).Error("failed batch updating usernames and nicknames")
}
}
}
}
func ProcessBatch(users []*UserGuildPair, members []*discordgo.Member) error {
configs := make([]*models.GuildLoggingConfig, 0)
err := common.SqlTX(func(tx *sql.Tx) error {
nickStatement, err := tx.Prepare("select nickname from nickname_listings where user_id=$1 AND guild_id=$2 order by id desc limit 1;")
if err != nil {
return errors.WrapIf(err, "nick stmnt prepare")
}
usernameStatement, err := tx.Prepare("select username from username_listings where user_id=$1 order by id desc limit 1;")
if err != nil {
return errors.WrapIf(err, "username stmnt prepare")
}
// first find all the configs
OUTERUSERS:
for _, v := range users {
for _, c := range configs {
if c.GuildID == v.GuildID {
continue OUTERUSERS
}
}
config, err := GetConfigCached(tx, v.GuildID)
if err != nil {
return errors.WrapIf(err, "users_configs")
}
configs = append(configs, config)
}
OUTERMEMBERS:
for _, v := range members {
for _, c := range configs {
if c.GuildID == v.GuildID {
continue OUTERMEMBERS
}
}
config, err := GetConfigCached(tx, v.GuildID)
if err != nil {
return errors.WrapIf(err, "members_configs")
}
configs = append(configs, config)
}
// update users first
OUTERUSERS_UPDT:
for _, v := range users {
// check if username logging is disabled
for _, c := range configs {
if c.GuildID == v.GuildID {
if !c.UsernameLoggingEnabled.Bool {
continue OUTERUSERS_UPDT
}
break
}
}
err = CheckUsername(tx, context.Background(), usernameStatement, v.User)
if err != nil {
return errors.WrapIf(err, "user username check")
}
}
// update members
for _, v := range members {
checkNick := false
checkUser := false
// find config
for _, c := range configs {
if c.GuildID == v.GuildID {
checkNick = c.NicknameLoggingEnabled.Bool
checkUser = c.UsernameLoggingEnabled.Bool
break
}
}
if !checkNick && !checkUser {
continue
}
err = CheckUsername(tx, context.Background(), usernameStatement, v.User)
if err != nil {
return errors.WrapIf(err, "members username check")
}
err = CheckNickname(tx, context.Background(), nickStatement, v.User.ID, v.GuildID, v.Nick)
if err != nil {
return errors.WrapIf(err, "members nickname check")
}
}
return nil
})
return err
}
type LightGC struct {
GuildID int64
Members []*discordgo.Member
}
func EvtProcesserGCs() {
for {
<-evtChanGC
// tx := common.GORM.Begin()
// conf, err := GetConfig(gc.GuildID)
// if err != nil {
// logger.WithError(err).Error("Failed fetching config")
// continue
// }
// started := time.Now()
// users := make([]*discordgo.User, len(gc.Members))
// for i, m := range gc.Members {
// users[i] = m.User
// }
// if conf.NicknameLoggingEnabled {
// CheckNicknameBulk(tx, gc.GuildID, gc.Members)
// }
// if conf.UsernameLoggingEnabled {
// CheckUsernameBulk(tx, users)
// }
// err = tx.Commit().Error
// if err != nil {
// logger.WithError(err).Error("Failed committing transaction")
// continue
// }
// if len(gc.Members) > 100 {
// logger.Infof("Checked %d members in %s", len(gc.Members), time.Since(started).String())
// // Make sure this dosen't use all our resources
// time.Sleep(time.Second * 25)
// } else {
// time.Sleep(time.Second * 15)
// }
}
}
const CacheKeyConfig bot.GSCacheKey = "logs_config"
func GetConfigCached(exec boil.ContextExecutor, gID int64) (*models.GuildLoggingConfig, error) {
gs := bot.State.Guild(true, gID)
if gs == nil {
return GetConfig(exec, context.Background(), gID)
}
v, err := gs.UserCacheFetch(CacheKeyConfig, func() (interface{}, error) {
conf, err := GetConfig(exec, context.Background(), gID)
return conf, err
})
if err != nil {
return nil, err
}
return v.(*models.GuildLoggingConfig), nil
}
| [
"\"YAGPDB_LOGS_DISABLE_USERNAME_TRACKING\""
]
| []
| [
"YAGPDB_LOGS_DISABLE_USERNAME_TRACKING"
]
| [] | ["YAGPDB_LOGS_DISABLE_USERNAME_TRACKING"] | go | 1 | 0 | |
src/cmd/go/internal/get/get.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package get implements the ``go get'' command.
package get
import (
"fmt"
"go/build"
"os"
"path/filepath"
"runtime"
"strings"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/load"
"cmd/go/internal/str"
"cmd/go/internal/web"
"cmd/go/internal/work"
)
var CmdGet = &base.Command{
UsageLine: "get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]",
Short: "download and install packages and dependencies",
Long: `
Get downloads the packages named by the import paths, along with their
dependencies. It then installs the named packages, like 'go install'.
The -d flag instructs get to stop after downloading the packages; that is,
it instructs get not to install the packages.
The -f flag, valid only when -u is set, forces get -u not to verify that
each package has been checked out from the source control repository
implied by its import path. This can be useful if the source is a local fork
of the original.
The -fix flag instructs get to run the fix tool on the downloaded packages
before resolving dependencies or building the code.
The -insecure flag permits fetching from repositories and resolving
custom domains using insecure schemes such as HTTP. Use with caution.
The -t flag instructs get to also download the packages required to build
the tests for the specified packages.
The -u flag instructs get to use the network to update the named packages
and their dependencies. By default, get uses the network to check out
missing packages but does not use it to look for updates to existing packages.
The -v flag enables verbose progress and debug output.
Get also accepts build flags to control the installation. See 'go help build'.
When checking out a new package, get creates the target directory
GOPATH/src/<import-path>. If the GOPATH contains multiple entries,
get uses the first one. For more details see: 'go help gopath'.
When checking out or updating a package, get looks for a branch or tag
that matches the locally installed version of Go. The most important
rule is that if the local installation is running version "go1", get
searches for a branch or tag named "go1". If no such version exists it
retrieves the most recent version of the package.
When go get checks out or updates a Git repository,
it also updates any git submodules referenced by the repository.
Get never checks out or updates code stored in vendor directories.
For more about specifying packages, see 'go help packages'.
For more about how 'go get' finds source code to
download, see 'go help importpath'.
See also: go build, go install, go clean.
`,
}
var getD = CmdGet.Flag.Bool("d", false, "")
var getF = CmdGet.Flag.Bool("f", false, "")
var getT = CmdGet.Flag.Bool("t", false, "")
var getU = CmdGet.Flag.Bool("u", false, "")
var getFix = CmdGet.Flag.Bool("fix", false, "")
var getInsecure = CmdGet.Flag.Bool("insecure", false, "")
func init() {
work.AddBuildFlags(CmdGet)
CmdGet.Run = runGet // break init loop
}
func runGet(cmd *base.Command, args []string) {
work.InstrumentInit()
work.BuildModeInit()
if *getF && !*getU {
base.Fatalf("go get: cannot use -f flag without -u")
}
// Disable any prompting for passwords by Git.
// Only has an effect for 2.3.0 or later, but avoiding
// the prompt in earlier versions is just too hard.
// If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
// prompting.
// See golang.org/issue/9341 and golang.org/issue/12706.
if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
os.Setenv("GIT_TERMINAL_PROMPT", "0")
}
// Disable any ssh connection pooling by Git.
// If a Git subprocess forks a child into the background to cache a new connection,
// that child keeps stdout/stderr open. After the Git subprocess exits,
// os /exec expects to be able to read from the stdout/stderr pipe
// until EOF to get all the data that the Git subprocess wrote before exiting.
// The EOF doesn't come until the child exits too, because the child
// is holding the write end of the pipe.
// This is unfortunate, but it has come up at least twice
// (see golang.org/issue/13453 and golang.org/issue/16104)
// and confuses users when it does.
// If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
// assume they know what they are doing and don't step on it.
// But default to turning off ControlMaster.
if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no")
}
// Phase 1. Download/update.
var stk load.ImportStack
mode := 0
if *getT {
mode |= load.GetTestDeps
}
args = downloadPaths(args)
for _, arg := range args {
download(arg, nil, &stk, mode)
}
base.ExitIfErrors()
// Phase 2. Rescan packages and re-evaluate args list.
// Code we downloaded and all code that depends on it
// needs to be evicted from the package cache so that
// the information will be recomputed. Instead of keeping
// track of the reverse dependency information, evict
// everything.
load.ClearPackageCache()
// In order to rebuild packages information completely,
// we need to clear commands cache. Command packages are
// referring to evicted packages from the package cache.
// This leads to duplicated loads of the standard packages.
load.ClearCmdCache()
args = load.ImportPaths(args)
load.PackagesForBuild(args)
// Phase 3. Install.
if *getD {
// Download only.
// Check delayed until now so that importPaths
// and packagesForBuild have a chance to print errors.
return
}
work.InstallPackages(args, true)
}
// downloadPaths prepares the list of paths to pass to download.
// It expands ... patterns that can be expanded. If there is no match
// for a particular pattern, downloadPaths leaves it in the result list,
// in the hope that we can figure out the repository from the
// initial ...-free prefix.
func downloadPaths(args []string) []string {
args = load.ImportPathsNoDotExpansion(args)
var out []string
for _, a := range args {
if strings.Contains(a, "...") {
var expand []string
// Use matchPackagesInFS to avoid printing
// warnings. They will be printed by the
// eventual call to importPaths instead.
if build.IsLocalImport(a) {
expand = load.MatchPackagesInFS(a)
} else {
expand = load.MatchPackages(a)
}
if len(expand) > 0 {
out = append(out, expand...)
continue
}
}
out = append(out, a)
}
return out
}
// downloadCache records the import paths we have already
// considered during the download, to avoid duplicate work when
// there is more than one dependency sequence leading to
// a particular package.
var downloadCache = map[string]bool{}
// downloadRootCache records the version control repository
// root directories we have already considered during the download.
// For example, all the packages in the github.com/google/codesearch repo
// share the same root (the directory for that path), and we only need
// to run the hg commands to consider each repository once.
var downloadRootCache = map[string]bool{}
// download runs the download half of the get command
// for the package named by the argument.
func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) {
if mode&load.UseVendor != 0 {
// Caller is responsible for expanding vendor paths.
panic("internal error: download mode has useVendor set")
}
load1 := func(path string, mode int) *load.Package {
if parent == nil {
return load.LoadPackage(path, stk)
}
return load.LoadImport(path, parent.Dir, parent, stk, nil, mode)
}
p := load1(arg, mode)
if p.Error != nil && p.Error.Hard {
base.Errorf("%s", p.Error)
return
}
// loadPackage inferred the canonical ImportPath from arg.
// Use that in the following to prevent hysteresis effects
// in e.g. downloadCache and packageCache.
// This allows invocations such as:
// mkdir -p $GOPATH/src/github.com/user
// cd $GOPATH/src/github.com/user
// go get ./foo
// see: golang.org/issue/9767
arg = p.ImportPath
// There's nothing to do if this is a package in the standard library.
if p.Standard {
return
}
// Only process each package once.
// (Unless we're fetching test dependencies for this package,
// in which case we want to process it again.)
if downloadCache[arg] && mode&load.GetTestDeps == 0 {
return
}
downloadCache[arg] = true
pkgs := []*load.Package{p}
wildcardOkay := len(*stk) == 0
isWildcard := false
// Download if the package is missing, or update if we're using -u.
if p.Dir == "" || *getU {
// The actual download.
stk.Push(arg)
err := downloadPackage(p)
if err != nil {
base.Errorf("%s", &load.PackageError{ImportStack: stk.Copy(), Err: err.Error()})
stk.Pop()
return
}
stk.Pop()
args := []string{arg}
// If the argument has a wildcard in it, re-evaluate the wildcard.
// We delay this until after reloadPackage so that the old entry
// for p has been replaced in the package cache.
if wildcardOkay && strings.Contains(arg, "...") {
if build.IsLocalImport(arg) {
args = load.MatchPackagesInFS(arg)
} else {
args = load.MatchPackages(arg)
}
isWildcard = true
}
// Clear all relevant package cache entries before
// doing any new loads.
load.ClearPackageCachePartial(args)
pkgs = pkgs[:0]
for _, arg := range args {
// Note: load calls loadPackage or loadImport,
// which push arg onto stk already.
// Do not push here too, or else stk will say arg imports arg.
p := load1(arg, mode)
if p.Error != nil {
base.Errorf("%s", p.Error)
continue
}
pkgs = append(pkgs, p)
}
}
// Process package, which might now be multiple packages
// due to wildcard expansion.
for _, p := range pkgs {
if *getFix {
files := base.FilterDotUnderscoreFiles(base.RelPaths(p.Internal.AllGoFiles))
base.Run(cfg.BuildToolexec, str.StringList(base.Tool("fix"), files))
// The imports might have changed, so reload again.
p = load.ReloadPackage(arg, stk)
if p.Error != nil {
base.Errorf("%s", p.Error)
return
}
}
if isWildcard {
// Report both the real package and the
// wildcard in any error message.
stk.Push(p.ImportPath)
}
// Process dependencies, now that we know what they are.
imports := p.Imports
if mode&load.GetTestDeps != 0 {
// Process test dependencies when -t is specified.
// (But don't get test dependencies for test dependencies:
// we always pass mode 0 to the recursive calls below.)
imports = str.StringList(imports, p.TestImports, p.XTestImports)
}
for i, path := range imports {
if path == "C" {
continue
}
// Fail fast on import naming full vendor path.
// Otherwise expand path as needed for test imports.
// Note that p.Imports can have additional entries beyond p.Internal.Build.Imports.
orig := path
if i < len(p.Internal.Build.Imports) {
orig = p.Internal.Build.Imports[i]
}
if j, ok := load.FindVendor(orig); ok {
stk.Push(path)
err := &load.PackageError{
ImportStack: stk.Copy(),
Err: "must be imported as " + path[j+len("vendor/"):],
}
stk.Pop()
base.Errorf("%s", err)
continue
}
// If this is a test import, apply vendor lookup now.
// We cannot pass useVendor to download, because
// download does caching based on the value of path,
// so it must be the fully qualified path already.
if i >= len(p.Imports) {
path = load.VendoredImportPath(p, path)
}
download(path, p, stk, 0)
}
if isWildcard {
stk.Pop()
}
}
}
// downloadPackage runs the create or download command
// to make the first copy of or update a copy of the given package.
func downloadPackage(p *load.Package) error {
var (
vcs *vcsCmd
repo, rootPath string
err error
)
security := web.Secure
if *getInsecure {
security = web.Insecure
}
if p.Internal.Build.SrcRoot != "" {
// Directory exists. Look for checkout along path to src.
vcs, rootPath, err = vcsFromDir(p.Dir, p.Internal.Build.SrcRoot)
if err != nil {
return err
}
repo = "<local>" // should be unused; make distinctive
// Double-check where it came from.
if *getU && vcs.remoteRepo != nil {
dir := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath))
remote, err := vcs.remoteRepo(vcs, dir)
if err != nil {
return err
}
repo = remote
if !*getF {
if rr, err := repoRootForImportPath(p.ImportPath, security); err == nil {
repo := rr.repo
if rr.vcs.resolveRepo != nil {
resolved, err := rr.vcs.resolveRepo(rr.vcs, dir, repo)
if err == nil {
repo = resolved
}
}
if remote != repo && rr.isCustom {
return fmt.Errorf("%s is a custom import path for %s, but %s is checked out from %s", rr.root, repo, dir, remote)
}
}
}
}
} else {
// Analyze the import path to determine the version control system,
// repository, and the import path for the root of the repository.
rr, err := repoRootForImportPath(p.ImportPath, security)
if err != nil {
return err
}
vcs, repo, rootPath = rr.vcs, rr.repo, rr.root
}
if !vcs.isSecure(repo) && !*getInsecure {
return fmt.Errorf("cannot download, %v uses insecure protocol", repo)
}
if p.Internal.Build.SrcRoot == "" {
// Package not found. Put in first directory of $GOPATH.
list := filepath.SplitList(cfg.BuildContext.GOPATH)
if len(list) == 0 {
return fmt.Errorf("cannot download, $GOPATH not set. For more details see: 'go help gopath'")
}
// Guard against people setting GOPATH=$GOROOT.
if filepath.Clean(list[0]) == filepath.Clean(cfg.GOROOT) {
return fmt.Errorf("cannot download, $GOPATH must not be set to $GOROOT. For more details see: 'go help gopath'")
}
if _, err := os.Stat(filepath.Join(list[0], "src/cmd/go/alldocs.go")); err == nil {
return fmt.Errorf("cannot download, %s is a GOROOT, not a GOPATH. For more details see: 'go help gopath'", list[0])
}
p.Internal.Build.Root = list[0]
p.Internal.Build.SrcRoot = filepath.Join(list[0], "src")
p.Internal.Build.PkgRoot = filepath.Join(list[0], "pkg")
}
root := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath))
// If we've considered this repository already, don't do it again.
if downloadRootCache[root] {
return nil
}
downloadRootCache[root] = true
if cfg.BuildV {
fmt.Fprintf(os.Stderr, "%s (download)\n", rootPath)
}
// Check that this is an appropriate place for the repo to be checked out.
// The target directory must either not exist or have a repo checked out already.
meta := filepath.Join(root, "."+vcs.cmd)
st, err := os.Stat(meta)
if err == nil && !st.IsDir() {
return fmt.Errorf("%s exists but is not a directory", meta)
}
if err != nil {
// Metadata directory does not exist. Prepare to checkout new copy.
// Some version control tools require the target directory not to exist.
// We require that too, just to avoid stepping on existing work.
if _, err := os.Stat(root); err == nil {
return fmt.Errorf("%s exists but %s does not - stale checkout?", root, meta)
}
_, err := os.Stat(p.Internal.Build.Root)
gopathExisted := err == nil
// Some version control tools require the parent of the target to exist.
parent, _ := filepath.Split(root)
if err = os.MkdirAll(parent, 0777); err != nil {
return err
}
if cfg.BuildV && !gopathExisted && p.Internal.Build.Root == cfg.BuildContext.GOPATH {
fmt.Fprintf(os.Stderr, "created GOPATH=%s; see 'go help gopath'\n", p.Internal.Build.Root)
}
if err = vcs.create(root, repo); err != nil {
return err
}
} else {
// Metadata directory does exist; download incremental updates.
if err = vcs.download(root); err != nil {
return err
}
}
if cfg.BuildN {
// Do not show tag sync in -n; it's noise more than anything,
// and since we're not running commands, no tag will be found.
// But avoid printing nothing.
fmt.Fprintf(os.Stderr, "# cd %s; %s sync/update\n", root, vcs.cmd)
return nil
}
// Select and sync to appropriate version of the repository.
tags, err := vcs.tags(root)
if err != nil {
return err
}
vers := runtime.Version()
if i := strings.Index(vers, " "); i >= 0 {
vers = vers[:i]
}
if err := vcs.tagSync(root, selectTag(vers, tags)); err != nil {
return err
}
return nil
}
// selectTag returns the closest matching tag for a given version.
// Closest means the latest one that is not after the current release.
// Version "goX" (or "goX.Y" or "goX.Y.Z") matches tags of the same form.
// Version "release.rN" matches tags of the form "go.rN" (N being a floating-point number).
// Version "weekly.YYYY-MM-DD" matches tags like "go.weekly.YYYY-MM-DD".
//
// NOTE(rsc): Eventually we will need to decide on some logic here.
// For now, there is only "go1". This matches the docs in go help get.
func selectTag(goVersion string, tags []string) (match string) {
for _, t := range tags {
if t == "go1" {
return "go1"
}
}
return ""
}
| [
"\"GIT_TERMINAL_PROMPT\"",
"\"GIT_SSH\"",
"\"GIT_SSH_COMMAND\""
]
| []
| [
"GIT_SSH",
"GIT_SSH_COMMAND",
"GIT_TERMINAL_PROMPT"
]
| [] | ["GIT_SSH", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT"] | go | 3 | 0 | |
third_party/virtualbox/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Common/InfClassObject.py | ## @file
# This file is used to define each component of INF file
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import EdkLogger
from CommonDataClass.CommonClass import LibraryClassClass
from CommonDataClass.ModuleClass import *
from String import *
from DataType import *
from Identification import *
from Dictionary import *
from BuildToolError import *
from Misc import sdict
import GlobalData
from Table.TableInf import TableInf
import Database
from Parsing import *
from Common.LongFilePathSupport import OpenLongFilePath as open
#
# Global variable
#
Section = {TAB_UNKNOWN.upper() : MODEL_UNKNOWN,
TAB_INF_DEFINES.upper() : MODEL_META_DATA_HEADER,
TAB_BUILD_OPTIONS.upper() : MODEL_META_DATA_BUILD_OPTION,
TAB_INCLUDES.upper() : MODEL_EFI_INCLUDE,
TAB_LIBRARIES.upper() : MODEL_EFI_LIBRARY_INSTANCE,
TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
TAB_PACKAGES.upper() : MODEL_META_DATA_PACKAGE,
TAB_NMAKE.upper() : MODEL_META_DATA_NMAKE,
TAB_INF_FIXED_PCD.upper() : MODEL_PCD_FIXED_AT_BUILD,
TAB_INF_PATCH_PCD.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
TAB_INF_FEATURE_PCD.upper() : MODEL_PCD_FEATURE_FLAG,
TAB_INF_PCD_EX.upper() : MODEL_PCD_DYNAMIC_EX,
TAB_INF_PCD.upper() : MODEL_PCD_DYNAMIC,
TAB_SOURCES.upper() : MODEL_EFI_SOURCE_FILE,
TAB_GUIDS.upper() : MODEL_EFI_GUID,
TAB_PROTOCOLS.upper() : MODEL_EFI_PROTOCOL,
TAB_PPIS.upper() : MODEL_EFI_PPI,
TAB_DEPEX.upper() : MODEL_EFI_DEPEX,
TAB_BINARIES.upper() : MODEL_EFI_BINARY_FILE,
TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION
}
gComponentType2ModuleType = {
"LIBRARY" : "BASE",
"SECURITY_CORE" : "SEC",
"PEI_CORE" : "PEI_CORE",
"COMBINED_PEIM_DRIVER" : "PEIM",
"PIC_PEIM" : "PEIM",
"RELOCATABLE_PEIM" : "PEIM",
"PE32_PEIM" : "PEIM",
"BS_DRIVER" : "DXE_DRIVER",
"RT_DRIVER" : "DXE_RUNTIME_DRIVER",
"SAL_RT_DRIVER" : "DXE_SAL_DRIVER",
"APPLICATION" : "UEFI_APPLICATION",
"LOGO" : "BASE",
}
gNmakeFlagPattern = re.compile("(?:EBC_)?([A-Z]+)_(?:STD_|PROJ_|ARCH_)?FLAGS(?:_DLL|_ASL|_EXE)?", re.UNICODE)
gNmakeFlagName2ToolCode = {
"C" : "CC",
"LIB" : "SLINK",
"LINK" : "DLINK",
}
class InfHeader(ModuleHeaderClass):
_Mapping_ = {
#
# Required Fields
#
TAB_INF_DEFINES_BASE_NAME : "Name",
TAB_INF_DEFINES_FILE_GUID : "Guid",
TAB_INF_DEFINES_MODULE_TYPE : "ModuleType",
TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION : "UefiSpecificationVersion",
TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION : "UefiSpecificationVersion",
TAB_INF_DEFINES_EDK_RELEASE_VERSION : "EdkReleaseVersion",
#
# Optional Fields
#
TAB_INF_DEFINES_INF_VERSION : "InfVersion",
TAB_INF_DEFINES_BINARY_MODULE : "BinaryModule",
TAB_INF_DEFINES_COMPONENT_TYPE : "ComponentType",
TAB_INF_DEFINES_MAKEFILE_NAME : "MakefileName",
TAB_INF_DEFINES_BUILD_NUMBER : "BuildNumber",
TAB_INF_DEFINES_BUILD_TYPE : "BuildType",
TAB_INF_DEFINES_FFS_EXT : "FfsExt",
TAB_INF_DEFINES_FV_EXT : "FvExt",
TAB_INF_DEFINES_SOURCE_FV : "SourceFv",
TAB_INF_DEFINES_VERSION_NUMBER : "VersionNumber",
TAB_INF_DEFINES_VERSION_STRING : "VersionString",
TAB_INF_DEFINES_VERSION : "Version",
TAB_INF_DEFINES_PCD_IS_DRIVER : "PcdIsDriver",
TAB_INF_DEFINES_TIANO_EDK_FLASHMAP_H : "TianoEdkFlashMap_h",
TAB_INF_DEFINES_SHADOW : "Shadow",
# TAB_INF_DEFINES_LIBRARY_CLASS : "LibraryClass",
# TAB_INF_DEFINES_ENTRY_POINT : "ExternImages",
# TAB_INF_DEFINES_UNLOAD_IMAGE : "ExternImages",
# TAB_INF_DEFINES_CONSTRUCTOR : ,
# TAB_INF_DEFINES_DESTRUCTOR : ,
# TAB_INF_DEFINES_DEFINE : "Define",
# TAB_INF_DEFINES_SPEC : "Specification",
# TAB_INF_DEFINES_CUSTOM_MAKEFILE : "CustomMakefile",
# TAB_INF_DEFINES_MACRO :
}
def __init__(self):
ModuleHeaderClass.__init__(self)
self.VersionNumber = ''
self.VersionString = ''
#print self.__dict__
def __setitem__(self, key, value):
self.__dict__[self._Mapping_[key]] = value
def __getitem__(self, key):
return self.__dict__[self._Mapping_[key]]
## "in" test support
def __contains__(self, key):
return key in self._Mapping_
## InfObject
#
# This class defined basic Inf object which is used by inheriting
#
# @param object: Inherited from object class
#
class InfObject(object):
def __init__(self):
object.__init__()
## Inf
#
# This class defined the structure used in Inf object
#
# @param InfObject: Inherited from InfObject class
# @param Ffilename: Input value for Ffilename of Inf file, default is None
# @param IsMergeAllArches: Input value for IsMergeAllArches
# True is to merge all arches
# Fales is not to merge all arches
# default is False
# @param IsToModule: Input value for IsToModule
# True is to transfer to ModuleObject automatically
# False is not to transfer to ModuleObject automatically
# default is False
# @param WorkspaceDir: Input value for current workspace directory, default is None
#
# @var Identification: To store value for Identification, it is a structure as Identification
# @var UserExtensions: To store value for UserExtensions
# @var Module: To store value for Module, it is a structure as ModuleClass
# @var WorkspaceDir: To store value for WorkspaceDir
# @var KeyList: To store value for KeyList, a list for all Keys used in Inf
#
class Inf(InfObject):
def __init__(self, Filename=None, IsToDatabase=False, IsToModule=False, WorkspaceDir=None, Database=None, SupArchList=DataType.ARCH_LIST):
self.Identification = Identification()
self.Module = ModuleClass()
self.UserExtensions = ''
self.WorkspaceDir = WorkspaceDir
self.SupArchList = SupArchList
self.IsToDatabase = IsToDatabase
self.Cur = Database.Cur
self.TblFile = Database.TblFile
self.TblInf = Database.TblInf
self.FileID = -1
#self.TblInf = TableInf(Database.Cur)
self.KeyList = [
TAB_SOURCES, TAB_BUILD_OPTIONS, TAB_BINARIES, TAB_INCLUDES, TAB_GUIDS,
TAB_PROTOCOLS, TAB_PPIS, TAB_LIBRARY_CLASSES, TAB_PACKAGES, TAB_LIBRARIES,
TAB_INF_FIXED_PCD, TAB_INF_PATCH_PCD, TAB_INF_FEATURE_PCD, TAB_INF_PCD,
TAB_INF_PCD_EX, TAB_DEPEX, TAB_NMAKE, TAB_INF_DEFINES
]
#
# Upper all KEYs to ignore case sensitive when parsing
#
self.KeyList = map(lambda c: c.upper(), self.KeyList)
#
# Init RecordSet
#
self.RecordSet = {}
for Key in self.KeyList:
self.RecordSet[Section[Key]] = []
#
# Load Inf file if filename is not None
#
if Filename != None:
self.LoadInfFile(Filename)
#
# Transfer to Module Object if IsToModule is True
#
if IsToModule:
self.InfToModule()
## Transfer to Module Object
#
# Transfer all contents of an Inf file to a standard Module Object
#
def InfToModule(self):
#
# Init global information for the file
#
ContainerFile = self.Identification.FileFullPath
#
# Generate Package Header
#
self.GenModuleHeader(ContainerFile)
#
# Generate BuildOptions
#
self.GenBuildOptions(ContainerFile)
#
# Generate Includes
#
self.GenIncludes(ContainerFile)
#
# Generate Libraries
#
self.GenLibraries(ContainerFile)
#
# Generate LibraryClasses
#
self.GenLibraryClasses(ContainerFile)
#
# Generate Packages
#
self.GenPackages(ContainerFile)
#
# Generate Nmakes
#
self.GenNmakes(ContainerFile)
#
# Generate Pcds
#
self.GenPcds(ContainerFile)
#
# Generate Sources
#
self.GenSources(ContainerFile)
#
# Generate UserExtensions
#
self.GenUserExtensions(ContainerFile)
#
# Generate Guids
#
self.GenGuidProtocolPpis(DataType.TAB_GUIDS, ContainerFile)
#
# Generate Protocols
#
self.GenGuidProtocolPpis(DataType.TAB_PROTOCOLS, ContainerFile)
#
# Generate Ppis
#
self.GenGuidProtocolPpis(DataType.TAB_PPIS, ContainerFile)
#
# Generate Depexes
#
self.GenDepexes(ContainerFile)
#
# Generate Binaries
#
self.GenBinaries(ContainerFile)
## Parse [Defines] section
#
# Parse [Defines] section into InfDefines object
#
# @param InfFile The path of the INF file
# @param Section The title of "Defines" section
# @param Lines The content of "Defines" section
#
def ParseDefines(self, InfFile, Section, Lines):
TokenList = Section.split(TAB_SPLIT)
if len(TokenList) == 3:
RaiseParserError(Section, "Defines", InfFile, "[xx.yy.%s] format (with platform) is not supported")
if len(TokenList) == 2:
Arch = TokenList[1].upper()
else:
Arch = TAB_ARCH_COMMON
if Arch not in self.Defines:
self.Defines[Arch] = InfDefines()
GetSingleValueOfKeyFromLines(Lines, self.Defines[Arch].DefinesDictionary,
TAB_COMMENT_SPLIT, TAB_EQUAL_SPLIT, False, None)
## Load Inf file
#
# Load the file if it exists
#
# @param Filename: Input value for filename of Inf file
#
def LoadInfFile(self, Filename):
#
# Insert a record for file
#
Filename = NormPath(Filename)
self.Identification.FileFullPath = Filename
(self.Identification.FileRelativePath, self.Identification.FileName) = os.path.split(Filename)
self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_INF)
#
# Init InfTable
#
#self.TblInf.Table = "Inf%s" % self.FileID
#self.TblInf.Create()
#
# Init common datas
#
IfDefList, SectionItemList, CurrentSection, ArchList, ThirdList, IncludeFiles = \
[], [], TAB_UNKNOWN, [], [], []
LineNo = 0
#
# Parse file content
#
IsFindBlockComment = False
ReservedLine = ''
for Line in open(Filename, 'r'):
LineNo = LineNo + 1
#
# Remove comment block
#
if Line.find(TAB_COMMENT_EDK_START) > -1:
ReservedLine = GetSplitList(Line, TAB_COMMENT_EDK_START, 1)[0]
IsFindBlockComment = True
if Line.find(TAB_COMMENT_EDK_END) > -1:
Line = ReservedLine + GetSplitList(Line, TAB_COMMENT_EDK_END, 1)[1]
ReservedLine = ''
IsFindBlockComment = False
if IsFindBlockComment:
continue
#
# Remove comments at tail and remove spaces again
#
Line = CleanString(Line)
if Line == '':
continue
#
# Find a new section tab
# First insert previous section items
# And then parse the content of the new section
#
if Line.startswith(TAB_SECTION_START) and Line.endswith(TAB_SECTION_END):
if Line[1:3] == "--":
continue
Model = Section[CurrentSection.upper()]
#
# Insert items data of previous section
#
InsertSectionItemsIntoDatabase(self.TblInf, self.FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, self.RecordSet)
#
# Parse the new section
#
SectionItemList = []
ArchList = []
ThirdList = []
CurrentSection = ''
LineList = GetSplitValueList(Line[len(TAB_SECTION_START):len(Line) - len(TAB_SECTION_END)], TAB_COMMA_SPLIT)
for Item in LineList:
ItemList = GetSplitValueList(Item, TAB_SPLIT)
if CurrentSection == '':
CurrentSection = ItemList[0]
else:
if CurrentSection != ItemList[0]:
EdkLogger.error("Parser", PARSER_ERROR, "Different section names '%s' and '%s' are found in one section definition, this is not allowed." % (CurrentSection, ItemList[0]), File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
if CurrentSection.upper() not in self.KeyList:
RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
CurrentSection = TAB_UNKNOWN
continue
ItemList.append('')
ItemList.append('')
if len(ItemList) > 5:
RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
else:
if ItemList[1] != '' and ItemList[1].upper() not in ARCH_LIST_FULL:
EdkLogger.error("Parser", PARSER_ERROR, "Invalid Arch definition '%s' found" % ItemList[1], File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
ArchList.append(ItemList[1].upper())
ThirdList.append(ItemList[2])
continue
#
# Not in any defined section
#
if CurrentSection == TAB_UNKNOWN:
ErrorMsg = "%s is not in any defined section" % Line
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
#
# Add a section item
#
SectionItemList.append([Line, LineNo])
# End of parse
#End of For
#
# Insert items data of last section
#
Model = Section[CurrentSection.upper()]
InsertSectionItemsIntoDatabase(self.TblInf, self.FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, self.RecordSet)
#
# Replace all DEFINE macros with its actual values
#
ParseDefineMacro2(self.TblInf, self.RecordSet, GlobalData.gGlobalDefines)
## Show detailed information of Module
#
# Print all members and their values of Module class
#
def ShowModule(self):
M = self.Module
for Arch in M.Header.keys():
print '\nArch =', Arch
print 'Filename =', M.Header[Arch].FileName
print 'FullPath =', M.Header[Arch].FullPath
print 'BaseName =', M.Header[Arch].Name
print 'Guid =', M.Header[Arch].Guid
print 'Version =', M.Header[Arch].Version
print 'InfVersion =', M.Header[Arch].InfVersion
print 'UefiSpecificationVersion =', M.Header[Arch].UefiSpecificationVersion
print 'EdkReleaseVersion =', M.Header[Arch].EdkReleaseVersion
print 'ModuleType =', M.Header[Arch].ModuleType
print 'BinaryModule =', M.Header[Arch].BinaryModule
print 'ComponentType =', M.Header[Arch].ComponentType
print 'MakefileName =', M.Header[Arch].MakefileName
print 'BuildNumber =', M.Header[Arch].BuildNumber
print 'BuildType =', M.Header[Arch].BuildType
print 'FfsExt =', M.Header[Arch].FfsExt
print 'FvExt =', M.Header[Arch].FvExt
print 'SourceFv =', M.Header[Arch].SourceFv
print 'PcdIsDriver =', M.Header[Arch].PcdIsDriver
print 'TianoEdkFlashMap_h =', M.Header[Arch].TianoEdkFlashMap_h
print 'Shadow =', M.Header[Arch].Shadow
print 'LibraryClass =', M.Header[Arch].LibraryClass
for Item in M.Header[Arch].LibraryClass:
print Item.LibraryClass, DataType.TAB_VALUE_SPLIT.join(Item.SupModuleList)
print 'CustomMakefile =', M.Header[Arch].CustomMakefile
print 'Define =', M.Header[Arch].Define
print 'Specification =', M.Header[Arch].Specification
for Item in self.Module.ExternImages:
print '\nEntry_Point = %s, UnloadImage = %s' % (Item.ModuleEntryPoint, Item.ModuleUnloadImage)
for Item in self.Module.ExternLibraries:
print 'Constructor = %s, Destructor = %s' % (Item.Constructor, Item.Destructor)
print '\nBuildOptions =', M.BuildOptions
for Item in M.BuildOptions:
print Item.ToolChainFamily, Item.ToolChain, Item.Option, Item.SupArchList
print '\nIncludes =', M.Includes
for Item in M.Includes:
print Item.FilePath, Item.SupArchList
print '\nLibraries =', M.Libraries
for Item in M.Libraries:
print Item.Library, Item.SupArchList
print '\nLibraryClasses =', M.LibraryClasses
for Item in M.LibraryClasses:
print Item.LibraryClass, Item.RecommendedInstance, Item.FeatureFlag, Item.SupModuleList, Item.SupArchList, Item.Define
print '\nPackageDependencies =', M.PackageDependencies
for Item in M.PackageDependencies:
print Item.FilePath, Item.SupArchList, Item.FeatureFlag
print '\nNmake =', M.Nmake
for Item in M.Nmake:
print Item.Name, Item.Value, Item.SupArchList
print '\nPcds =', M.PcdCodes
for Item in M.PcdCodes:
print '\tCName=', Item.CName, 'TokenSpaceGuidCName=', Item.TokenSpaceGuidCName, 'DefaultValue=', Item.DefaultValue, 'ItemType=', Item.ItemType, Item.SupArchList
print '\nSources =', M.Sources
for Source in M.Sources:
print Source.SourceFile, 'Fam=', Source.ToolChainFamily, 'Pcd=', Source.FeatureFlag, 'Tag=', Source.TagName, 'ToolCode=', Source.ToolCode, Source.SupArchList
print '\nUserExtensions =', M.UserExtensions
for UserExtension in M.UserExtensions:
print UserExtension.UserID, UserExtension.Identifier, UserExtension.Content
print '\nGuids =', M.Guids
for Item in M.Guids:
print Item.CName, Item.SupArchList, Item.FeatureFlag
print '\nProtocols =', M.Protocols
for Item in M.Protocols:
print Item.CName, Item.SupArchList, Item.FeatureFlag
print '\nPpis =', M.Ppis
for Item in M.Ppis:
print Item.CName, Item.SupArchList, Item.FeatureFlag
print '\nDepex =', M.Depex
for Item in M.Depex:
print Item.Depex, Item.SupArchList, Item.Define
print '\nBinaries =', M.Binaries
for Binary in M.Binaries:
print 'Type=', Binary.FileType, 'Target=', Binary.Target, 'Name=', Binary.BinaryFile, 'FeatureFlag=', Binary.FeatureFlag, 'SupArchList=', Binary.SupArchList
## Convert [Defines] section content to ModuleHeaderClass
#
# Convert [Defines] section content to ModuleHeaderClass
#
# @param Defines The content under [Defines] section
# @param ModuleHeader An object of ModuleHeaderClass
# @param Arch The supported ARCH
#
def GenModuleHeader(self, ContainerFile):
EdkLogger.debug(2, "Generate ModuleHeader ...")
File = self.Identification.FileFullPath
#
# Update all defines item in database
#
RecordSet = self.RecordSet[MODEL_META_DATA_HEADER]
for Record in RecordSet:
ValueList = GetSplitValueList(Record[0], TAB_EQUAL_SPLIT)
if len(ValueList) != 2:
RaiseParserError(Record[0], 'Defines', ContainerFile, '<Key> = <Value>', Record[2])
ID, Value1, Value2, Arch, LineNo = Record[3], ValueList[0], ValueList[1], Record[1], Record[2]
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Value1), ConvertToSqlString2(Value2), ID)
self.TblInf.Exec(SqlCommand)
for Arch in DataType.ARCH_LIST:
ModuleHeader = InfHeader()
ModuleHeader.FileName = self.Identification.FileName
ModuleHeader.FullPath = self.Identification.FileFullPath
DefineList = QueryDefinesItem2(self.TblInf, Arch, self.FileID)
NotProcessedDefineList = []
for D in DefineList:
if D[0] in ModuleHeader:
ModuleHeader[D[0]] = GetSplitValueList(D[1])[0]
else:
NotProcessedDefineList.append(D)
if ModuleHeader.ComponentType == "LIBRARY":
Lib = LibraryClassClass()
Lib.LibraryClass = ModuleHeader.Name
Lib.SupModuleList = DataType.SUP_MODULE_LIST
ModuleHeader.LibraryClass.append(Lib)
# we need to make some key defines resolved first
for D in NotProcessedDefineList:
if D[0] == TAB_INF_DEFINES_LIBRARY_CLASS:
List = GetSplitValueList(D[1], DataType.TAB_VALUE_SPLIT, 1)
Lib = LibraryClassClass()
Lib.LibraryClass = CleanString(List[0])
if len(List) == 1:
Lib.SupModuleList = DataType.SUP_MODULE_LIST
elif len(List) == 2:
Lib.SupModuleList = GetSplitValueList(CleanString(List[1]), ' ')
ModuleHeader.LibraryClass.append(Lib)
elif D[0] == TAB_INF_DEFINES_CUSTOM_MAKEFILE:
List = D[1].split(DataType.TAB_VALUE_SPLIT)
if len(List) == 2:
ModuleHeader.CustomMakefile[CleanString(List[0])] = CleanString(List[1])
else:
RaiseParserError(D[1], 'CUSTOM_MAKEFILE of Defines', File, 'CUSTOM_MAKEFILE=<Family>|<Filename>', D[2])
elif D[0] == TAB_INF_DEFINES_ENTRY_POINT:
Image = ModuleExternImageClass()
Image.ModuleEntryPoint = CleanString(D[1])
self.Module.ExternImages.append(Image)
elif D[0] == TAB_INF_DEFINES_UNLOAD_IMAGE:
Image = ModuleExternImageClass()
Image.ModuleUnloadImage = CleanString(D[1])
self.Module.ExternImages.append(Image)
elif D[0] == TAB_INF_DEFINES_CONSTRUCTOR:
LibraryClass = ModuleExternLibraryClass()
LibraryClass.Constructor = CleanString(D[1])
self.Module.ExternLibraries.append(LibraryClass)
elif D[0] == TAB_INF_DEFINES_DESTRUCTOR:
LibraryClass = ModuleExternLibraryClass()
LibraryClass.Destructor = CleanString(D[1])
self.Module.ExternLibraries.append(LibraryClass)
elif D[0] == TAB_INF_DEFINES_DEFINE:
List = D[1].split(DataType.TAB_EQUAL_SPLIT)
if len(List) != 2:
RaiseParserError(Item, 'DEFINE of Defines', File, 'DEFINE <Word> = <Word>', D[2])
else:
ModuleHeader.Define[CleanString(List[0])] = CleanString(List[1])
elif D[0] == TAB_INF_DEFINES_SPEC:
List = D[1].split(DataType.TAB_EQUAL_SPLIT)
if len(List) != 2:
RaiseParserError(Item, 'SPEC of Defines', File, 'SPEC <Word> = <Version>', D[2])
else:
ModuleHeader.Specification[CleanString(List[0])] = CleanString(List[1])
#
# Get version of INF
#
if ModuleHeader.InfVersion != "":
# EdkII inf
VersionNumber = ModuleHeader.VersionNumber
VersionString = ModuleHeader.VersionString
if len(VersionNumber) > 0 and len(VersionString) == 0:
EdkLogger.warn(2000, 'VERSION_NUMBER depricated; INF file %s should be modified to use VERSION_STRING instead.' % self.Identification.FileFullPath)
ModuleHeader.Version = VersionNumber
if len(VersionString) > 0:
if len(VersionNumber) > 0:
EdkLogger.warn(2001, 'INF file %s defines both VERSION_NUMBER and VERSION_STRING, using VERSION_STRING' % self.Identification.FileFullPath)
ModuleHeader.Version = VersionString
else:
# Edk inf
ModuleHeader.InfVersion = "0x00010000"
if ModuleHeader.ComponentType in gComponentType2ModuleType:
ModuleHeader.ModuleType = gComponentType2ModuleType[ModuleHeader.ComponentType]
elif ModuleHeader.ComponentType != '':
EdkLogger.error("Parser", PARSER_ERROR, "Unsupported Edk component type [%s]" % ModuleHeader.ComponentType, ExtraData=File, RaiseError=EdkLogger.IsRaiseError)
self.Module.Header[Arch] = ModuleHeader
## GenBuildOptions
#
# Gen BuildOptions of Inf
# [<Family>:]<ToolFlag>=Flag
#
# @param ContainerFile: The Inf file full path
#
def GenBuildOptions(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_BUILD_OPTIONS)
BuildOptions = {}
#
# Get all BuildOptions
#
RecordSet = self.RecordSet[MODEL_META_DATA_BUILD_OPTION]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(Family, ToolChain, Flag) = GetBuildOption(Record[0], ContainerFile, Record[2])
MergeArches(BuildOptions, (Family, ToolChain, Flag), Arch)
#
# Update to Database
#
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Family), ConvertToSqlString2(ToolChain), ConvertToSqlString2(Flag), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in BuildOptions.keys():
BuildOption = BuildOptionClass(Key[0], Key[1], Key[2])
BuildOption.SupArchList = BuildOptions[Key]
self.Module.BuildOptions.append(BuildOption)
## GenIncludes
#
# Gen Includes of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def GenIncludes(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_INCLUDES)
Includes = sdict()
#
# Get all Includes
#
RecordSet = self.RecordSet[MODEL_EFI_INCLUDE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
MergeArches(Includes, Record[0], Arch)
for Key in Includes.keys():
Include = IncludeClass()
Include.FilePath = NormPath(Key)
Include.SupArchList = Includes[Key]
self.Module.Includes.append(Include)
## GenLibraries
#
# Gen Libraries of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def GenLibraries(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARIES)
Libraries = sdict()
#
# Get all Includes
#
RecordSet = self.RecordSet[MODEL_EFI_LIBRARY_INSTANCE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
MergeArches(Libraries, Record[0], Arch)
for Key in Libraries.keys():
Library = ModuleLibraryClass()
# replace macro and remove file extension
Library.Library = Key.rsplit('.', 1)[0]
Library.SupArchList = Libraries[Key]
self.Module.Libraries.append(Library)
## GenLibraryClasses
#
# Get LibraryClass of Inf
# <LibraryClassKeyWord>|<LibraryInstance>
#
# @param ContainerFile: The Inf file full path
#
def GenLibraryClasses(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARY_CLASSES)
LibraryClasses = {}
#
# Get all LibraryClasses
#
RecordSet = self.RecordSet[MODEL_EFI_LIBRARY_CLASS]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(LibClassName, LibClassIns, Pcd, SupModelList) = GetLibraryClassOfInf([Record[0], Record[4]], ContainerFile, self.WorkspaceDir, Record[2])
MergeArches(LibraryClasses, (LibClassName, LibClassIns, Pcd, SupModelList), Arch)
#
# Update to Database
#
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(LibClassName), ConvertToSqlString2(LibClassIns), ConvertToSqlString2(SupModelList), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in LibraryClasses.keys():
KeyList = Key[0].split(DataType.TAB_VALUE_SPLIT)
LibraryClass = LibraryClassClass()
LibraryClass.LibraryClass = Key[0]
LibraryClass.RecommendedInstance = NormPath(Key[1])
LibraryClass.FeatureFlag = Key[2]
LibraryClass.SupArchList = LibraryClasses[Key]
LibraryClass.SupModuleList = GetSplitValueList(Key[3])
self.Module.LibraryClasses.append(LibraryClass)
## GenPackages
#
# Gen Packages of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def GenPackages(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_PACKAGES)
Packages = {}
#
# Get all Packages
#
RecordSet = self.RecordSet[MODEL_META_DATA_PACKAGE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(Package, Pcd) = GetPackage(Record[0], ContainerFile, self.WorkspaceDir, Record[2])
MergeArches(Packages, (Package, Pcd), Arch)
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Package), ConvertToSqlString2(Pcd), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in Packages.keys():
Package = ModulePackageDependencyClass()
Package.FilePath = NormPath(Key[0])
Package.SupArchList = Packages[Key]
Package.FeatureFlag = Key[1]
self.Module.PackageDependencies.append(Package)
## GenNmakes
#
# Gen Nmakes of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def GenNmakes(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_NMAKE)
Nmakes = sdict()
#
# Get all Nmakes
#
RecordSet = self.RecordSet[MODEL_META_DATA_NMAKE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
MergeArches(Nmakes, Record[0], Arch)
for Key in Nmakes.keys():
List = GetSplitValueList(Key, DataType.TAB_EQUAL_SPLIT, MaxSplit=1)
if len(List) != 2:
RaiseParserError(Key, 'Nmake', ContainerFile, '<MacroName> = <Value>')
continue
Nmake = ModuleNmakeClass()
Nmake.Name = List[0]
Nmake.Value = List[1]
Nmake.SupArchList = Nmakes[Key]
self.Module.Nmake.append(Nmake)
# convert Edk format to EdkII format
if Nmake.Name == "IMAGE_ENTRY_POINT":
Image = ModuleExternImageClass()
Image.ModuleEntryPoint = Nmake.Value
self.Module.ExternImages.append(Image)
elif Nmake.Name == "DPX_SOURCE":
Source = ModuleSourceFileClass(NormPath(Nmake.Value), "", "", "", "", Nmake.SupArchList)
self.Module.Sources.append(Source)
else:
ToolList = gNmakeFlagPattern.findall(Nmake.Name)
if len(ToolList) == 0 or len(ToolList) != 1:
EdkLogger.warn("\nParser", "Don't know how to do with MACRO: %s" % Nmake.Name,
ExtraData=ContainerFile)
else:
if ToolList[0] in gNmakeFlagName2ToolCode:
Tool = gNmakeFlagName2ToolCode[ToolList[0]]
else:
Tool = ToolList[0]
BuildOption = BuildOptionClass("MSFT", "*_*_*_%s_FLAGS" % Tool, Nmake.Value)
BuildOption.SupArchList = Nmake.SupArchList
self.Module.BuildOptions.append(BuildOption)
## GenPcds
#
# Gen Pcds of Inf
# <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
#
# @param ContainerFile: The Dec file full path
#
def GenPcds(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_PCDS)
Pcds = {}
PcdToken = {}
#
# Get all Guids
#
RecordSet1 = self.RecordSet[MODEL_PCD_FIXED_AT_BUILD]
RecordSet2 = self.RecordSet[MODEL_PCD_PATCHABLE_IN_MODULE]
RecordSet3 = self.RecordSet[MODEL_PCD_FEATURE_FLAG]
RecordSet4 = self.RecordSet[MODEL_PCD_DYNAMIC_EX]
RecordSet5 = self.RecordSet[MODEL_PCD_DYNAMIC]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet1:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
if self.Module.Header[Arch].LibraryClass != {}:
pass
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_FIXED_AT_BUILD, ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet2:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_PATCHABLE_IN_MODULE, ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet3:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_FEATURE_FLAG, ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet4:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_DYNAMIC_EX, ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet5:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], "", ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
#
# Update to database
#
if self.IsToDatabase:
for Key in PcdToken.keys():
SqlCommand = """update %s set Value2 = '%s' where ID = %s""" % (self.TblInf.Table, ".".join((PcdToken[Key][0], PcdToken[Key][1])), Key)
self.TblInf.Exec(SqlCommand)
for Key in Pcds.keys():
Pcd = PcdClass()
Pcd.CName = Key[1]
Pcd.TokenSpaceGuidCName = Key[0]
Pcd.DefaultValue = Key[2]
Pcd.ItemType = Key[3]
Pcd.SupArchList = Pcds[Key]
self.Module.PcdCodes.append(Pcd)
## GenSources
#
# Gen Sources of Inf
# <Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]
#
# @param ContainerFile: The Dec file full path
#
def GenSources(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_SOURCES)
Sources = {}
#
# Get all Nmakes
#
RecordSet = self.RecordSet[MODEL_EFI_SOURCE_FILE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(Filename, Family, TagName, ToolCode, Pcd) = GetSource(Record[0], ContainerFile, self.Identification.FileRelativePath, Record[2])
MergeArches(Sources, (Filename, Family, TagName, ToolCode, Pcd), Arch)
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s', Value4 = '%s', Value5 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Filename), ConvertToSqlString2(Family), ConvertToSqlString2(TagName), ConvertToSqlString2(ToolCode), ConvertToSqlString2(Pcd), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in Sources.keys():
Source = ModuleSourceFileClass(Key[0], Key[2], Key[3], Key[1], Key[4], Sources[Key])
self.Module.Sources.append(Source)
## GenUserExtensions
#
# Gen UserExtensions of Inf
#
def GenUserExtensions(self, ContainerFile):
# #
# # UserExtensions
# #
# if self.UserExtensions != '':
# UserExtension = UserExtensionsClass()
# Lines = self.UserExtensions.splitlines()
# List = GetSplitValueList(Lines[0], DataType.TAB_SPLIT, 2)
# if len(List) != 3:
# RaiseParserError(Lines[0], 'UserExtensions', File, "UserExtensions.UserId.'Identifier'")
# else:
# UserExtension.UserID = List[1]
# UserExtension.Identifier = List[2][0:-1].replace("'", '').replace('\"', '')
# for Line in Lines[1:]:
# UserExtension.Content = UserExtension.Content + CleanString(Line) + '\n'
# self.Module.UserExtensions.append(UserExtension)
pass
## GenDepexes
#
# Gen Depex of Inf
#
# @param ContainerFile: The Inf file full path
#
def GenDepexes(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_DEPEX)
Depex = {}
#
# Get all Depexes
#
RecordSet = self.RecordSet[MODEL_EFI_DEPEX]
#
# Go through each arch
#
for Arch in self.SupArchList:
Line = ''
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
Line = Line + Record[0] + ' '
if Line != '':
MergeArches(Depex, Line, Arch)
for Key in Depex.keys():
Dep = ModuleDepexClass()
Dep.Depex = Key
Dep.SupArchList = Depex[Key]
self.Module.Depex.append(Dep)
## GenBinaries
#
# Gen Binary of Inf
# <FileType>|<Filename>|<Target>[|<TokenSpaceGuidCName>.<PcdCName>]
#
# @param ContainerFile: The Dec file full path
#
def GenBinaries(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_BINARIES)
Binaries = {}
#
# Get all Guids
#
RecordSet = self.RecordSet[MODEL_EFI_BINARY_FILE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(FileType, Filename, Target, Pcd) = GetBinary(Record[0], ContainerFile, self.Identification.FileRelativePath, Record[2])
MergeArches(Binaries, (FileType, Filename, Target, Pcd), Arch)
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s', Value4 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(FileType), ConvertToSqlString2(Filename), ConvertToSqlString2(Target), ConvertToSqlString2(Pcd), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in Binaries.keys():
Binary = ModuleBinaryFileClass(NormPath(Key[1]), Key[0], Key[2], Key[3], Binaries[Key])
self.Module.Binaries.append(Binary)
## GenGuids
#
# Gen Guids of Inf
# <CName>=<GuidValue>
#
# @param ContainerFile: The Inf file full path
#
def GenGuidProtocolPpis(self, Type, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % Type)
Lists = {}
#
# Get all Items
#
RecordSet = self.RecordSet[Section[Type.upper()]]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(Name, Value) = GetGuidsProtocolsPpisOfInf(Record[0], Type, ContainerFile, Record[2])
MergeArches(Lists, (Name, Value), Arch)
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Name), ConvertToSqlString2(Value), Record[3])
self.TblInf.Exec(SqlCommand)
ListMember = None
if Type == TAB_GUIDS:
ListMember = self.Module.Guids
elif Type == TAB_PROTOCOLS:
ListMember = self.Module.Protocols
elif Type == TAB_PPIS:
ListMember = self.Module.Ppis
for Key in Lists.keys():
ListClass = GuidProtocolPpiCommonClass()
ListClass.CName = Key[0]
ListClass.SupArchList = Lists[Key]
ListClass.FeatureFlag = Key[1]
ListMember.append(ListClass)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
EdkLogger.Initialize()
EdkLogger.SetLevel(EdkLogger.DEBUG_0)
W = os.getenv('WORKSPACE')
F = os.path.join(W, 'MdeModulePkg/Application/HelloWorld/HelloWorld.inf')
Db = Database.Database('Inf.db')
Db.InitDatabase()
P = Inf(os.path.normpath(F), True, True, W, Db)
P.ShowModule()
Db.Close()
| []
| []
| [
"WORKSPACE"
]
| [] | ["WORKSPACE"] | python | 1 | 0 | |
internal/config/config.go | package config
import (
"bufio"
"bytes"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"strings"
"syscall"
"github.com/spf13/afero"
"github.com/spf13/viper"
gitlab "github.com/xanzy/go-gitlab"
"github.com/zaquestion/lab/internal/git"
"golang.org/x/crypto/ssh/terminal"
)
const defaultGitLabHost = "https://gitlab.com"
var (
MainConfig *viper.Viper
)
// New prompts the user for the default config values to use with lab, and save
// them to the provided confpath (default: ~/.config/lab.hcl)
func New(confpath string, r io.Reader) error {
var (
reader = bufio.NewReader(r)
host, token, loadToken string
err error
)
confpath = path.Join(confpath, "lab.toml")
// If core host is set in the environment (LAB_CORE_HOST) we only want
// to prompt for the token. We'll use the environments host and place
// it in the config. In the event both the host and token are in the
// env, this function shouldn't be called in the first place
if MainConfig.GetString("core.host") == "" {
fmt.Printf("Enter GitLab host (default: %s): ", defaultGitLabHost)
host, err = reader.ReadString('\n')
host = strings.TrimSpace(host)
if err != nil {
return err
}
if host == "" {
host = defaultGitLabHost
}
} else {
// Required to correctly write config
host = MainConfig.GetString("core.host")
}
MainConfig.Set("core.host", host)
token, loadToken, err = readPassword(*reader)
if err != nil {
return err
}
if token != "" {
MainConfig.Set("core.token", token)
} else if loadToken != "" {
MainConfig.Set("core.load_token", loadToken)
}
if err := MainConfig.WriteConfigAs(confpath); err != nil {
return err
}
fmt.Printf("\nConfig saved to %s\n", confpath)
err = MainConfig.ReadInConfig()
if err != nil {
log.Fatal(err)
}
return nil
}
var readPassword = func(reader bufio.Reader) (string, string, error) {
var loadToken string
tokenURL, err := url.Parse(viper.GetString("core.host"))
if err != nil {
return "", "", err
}
tokenURL.Path = "profile/personal_access_tokens"
fmt.Printf("Create a token here: %s\nEnter default GitLab token (scope: api), or leave blank to provide a command to load the token: ", tokenURL.String())
byteToken, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
return "", "", err
}
if strings.TrimSpace(string(byteToken)) == "" {
fmt.Printf("\nEnter command to load the token:")
loadToken, err = reader.ReadString('\n')
if err != nil {
return "", "", err
}
}
if strings.TrimSpace(string(byteToken)) == "" && strings.TrimSpace(loadToken) == "" {
log.Fatal("Error: No token provided. A token can be created at ", tokenURL.String())
}
return strings.TrimSpace(string(byteToken)), strings.TrimSpace(loadToken), nil
}
// CI returns credentials suitable for use within GitLab CI or empty strings if
// none found.
func CI() (string, string, string) {
ciToken := os.Getenv("CI_JOB_TOKEN")
if ciToken == "" {
return "", "", ""
}
ciHost := strings.TrimSuffix(os.Getenv("CI_PROJECT_URL"), os.Getenv("CI_PROJECT_PATH"))
if ciHost == "" {
return "", "", ""
}
ciUser := os.Getenv("GITLAB_USER_LOGIN")
return ciHost, ciUser, ciToken
}
// ConvertHCLtoTOML() converts an .hcl file to a .toml file
func ConvertHCLtoTOML(oldpath string, newpath string, file string) {
oldconfig := oldpath + "/" + file + ".hcl"
newconfig := newpath + "/" + file + ".toml"
if _, err := os.Stat(oldconfig); os.IsNotExist(err) {
return
}
if _, err := os.Stat(newconfig); err == nil {
return
}
// read in the old config HCL file and write out the new TOML file
oldConfig := viper.New()
oldConfig.SetConfigName("lab")
oldConfig.SetConfigType("hcl")
oldConfig.AddConfigPath(oldpath)
oldConfig.ReadInConfig()
oldConfig.SetConfigType("toml")
oldConfig.WriteConfigAs(newconfig)
// delete the old config HCL file
if err := os.Remove(oldconfig); err != nil {
fmt.Println("Warning: Could not delete old config file", oldconfig)
}
// HACK
// viper HCL parsing is broken and simply translating it to a TOML file
// results in a broken toml file. The issue is that there are double
// square brackets for each entry where there should be single
// brackets. Note: this hack only works because the config file is
// simple and doesn't contain deeply embedded config entries.
text, err := ioutil.ReadFile(newconfig)
if err != nil {
log.Fatal(err)
}
text = bytes.Replace(text, []byte("[["), []byte("["), -1)
text = bytes.Replace(text, []byte("]]"), []byte("]"), -1)
if err = ioutil.WriteFile(newconfig, text, 0666); err != nil {
fmt.Println(err)
os.Exit(1)
}
// END HACK
fmt.Println("INFO: Converted old config", oldconfig, "to new config", newconfig)
}
func getUser(host, token string, skipVerify bool) string {
user := MainConfig.GetString("core.user")
if user != "" {
return user
}
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: skipVerify,
},
},
}
lab, _ := gitlab.NewClient(token, gitlab.WithHTTPClient(httpClient), gitlab.WithBaseURL(host+"/api/v4"))
u, _, err := lab.Users.CurrentUser()
if err != nil {
log.Fatal(err)
}
if strings.TrimSpace(os.Getenv("LAB_CORE_TOKEN")) == "" && strings.TrimSpace(os.Getenv("LAB_CORE_HOST")) == "" {
MainConfig.Set("core.user", u.Username)
MainConfig.WriteConfig()
}
return u.Username
}
// GetToken returns a token string from the config file.
// The token string can be cleartext or returned from a password manager or
// encryption utility.
func GetToken() string {
token := MainConfig.GetString("core.token")
if token == "" && MainConfig.GetString("core.load_token") != "" {
// args[0] isn't really an arg ;)
args := strings.Split(MainConfig.GetString("core.load_token"), " ")
_token, err := exec.Command(args[0], args[1:]...).Output()
if err != nil {
log.Fatal(err)
}
token = string(_token)
// tools like pass and a simple bash script add a '\n' to
// their output which confuses the gitlab WebAPI
if token[len(token)-1:] == "\n" {
token = strings.TrimSuffix(token, "\n")
}
}
return token
}
// LoadMainConfig() loads the main config file and returns a tuple of
// host, user, token, ca_file, skipVerify
func LoadMainConfig() (string, string, string, string, bool) {
// The lab config heirarchy is:
// 1. ENV variables (LAB_CORE_TOKEN, LAB_CORE_HOST)
// - if specified, core.token and core.host values in
// config files are not updated.
// 2. "dot" . user specified config
// - if specified, lower order config files will not override
// the user specified config
// 3. .config/lab/lab.toml (global config)
// 4. .git/lab/lab/toml (worktree config)
//
// Values from the worktree config will override any global config settings.
// Attempt to auto-configure for GitLab CI.
// Always do this before reading in the config file o/w CI will end up
// with the wrong data.
host, user, token := CI()
if host != "" && user != "" && token != "" {
return host, user, token, "", false
}
// Try to find XDG_CONFIG_HOME which is declared in XDG base directory
// specification and use it's location as the config directory
home, err := os.UserHomeDir()
if err != nil {
log.Fatal(err)
}
confpath := os.Getenv("XDG_CONFIG_HOME")
if confpath == "" {
confpath = path.Join(home, ".config")
}
labconfpath := confpath + "/lab"
if _, err := os.Stat(labconfpath); os.IsNotExist(err) {
os.MkdirAll(labconfpath, 0700)
}
// Convert old hcl files to toml format.
// NO NEW FILES SHOULD BE ADDED BELOW.
ConvertHCLtoTOML(confpath, labconfpath, "lab")
ConvertHCLtoTOML(".", ".", "lab")
var labgitDir string
gitDir, err := git.GitDir()
if err == nil {
labgitDir = gitDir + "/lab"
ConvertHCLtoTOML(gitDir, labgitDir, "lab")
ConvertHCLtoTOML(labgitDir, labgitDir, "show_metadata")
}
MainConfig = viper.New()
MainConfig.SetConfigName("lab")
MainConfig.SetConfigType("toml")
// The local path (aka 'dot slash') does not allow for any
// overrides from the work tree lab.toml
MainConfig.AddConfigPath(".")
MainConfig.AddConfigPath(labconfpath)
if labgitDir != "" {
MainConfig.AddConfigPath(labgitDir)
}
MainConfig.SetEnvPrefix("LAB")
MainConfig.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
MainConfig.AutomaticEnv()
if _, ok := MainConfig.ReadInConfig().(viper.ConfigFileNotFoundError); ok {
// Create a new config
err := New(labconfpath, os.Stdin)
if err != nil {
log.Fatal(err)
}
} else {
// Config already exists. Merge in .git/lab/lab.toml file
_, err := os.Stat(labgitDir + "/lab.toml")
if MainConfig.ConfigFileUsed() == labconfpath+"/lab.toml" && !os.IsNotExist(err) {
file, err := afero.ReadFile(afero.NewOsFs(), labgitDir+"/lab.toml")
if err != nil {
log.Fatal(err)
}
MainConfig.MergeConfig(bytes.NewReader(file))
}
}
host = MainConfig.GetString("core.host")
token = GetToken()
ca_file := MainConfig.GetString("tls.ca_file")
tlsSkipVerify := MainConfig.GetBool("tls.skip_verify")
user = getUser(host, token, tlsSkipVerify)
return host, user, token, ca_file, tlsSkipVerify
}
// default path of worktree lab.toml file
var (
WorkTreePath string = ".git/lab"
WorkTreeName string = "lab"
)
// LoadConfig loads a config file specified by configpath and configname.
// The configname must not have a '.toml' extension. If configpath and/or
// configname are unspecified, the worktree defaults will be used.
func LoadConfig(configpath string, configname string) *viper.Viper {
targetConfig := viper.New()
targetConfig.SetConfigType("toml")
if configpath == "" {
configpath = WorkTreePath
}
if configname == "" {
configname = WorkTreeName
}
targetConfig.AddConfigPath(configpath)
targetConfig.SetConfigName(configname)
if _, ok := targetConfig.ReadInConfig().(viper.ConfigFileNotFoundError); ok {
if _, err := os.Stat(configpath); os.IsNotExist(err) {
os.MkdirAll(configpath, os.ModePerm)
}
if err := targetConfig.WriteConfigAs(configpath + "/" + configname + ".toml"); err != nil {
log.Fatal(err)
}
if err := targetConfig.ReadInConfig(); err != nil {
log.Fatal(err)
}
}
return targetConfig
}
// WriteConfigEntry writes a value specified by desc and value to the
// configfile specified by configpath and configname. If configpath and/or
// configname are unspecified, the worktree defaults will be used.
func WriteConfigEntry(desc string, value interface{}, configpath string, configname string) {
targetConfig := LoadConfig(configpath, configname)
targetConfig.Set(desc, value)
targetConfig.WriteConfig()
}
| [
"\"CI_JOB_TOKEN\"",
"\"CI_PROJECT_URL\"",
"\"CI_PROJECT_PATH\"",
"\"GITLAB_USER_LOGIN\"",
"\"LAB_CORE_TOKEN\"",
"\"LAB_CORE_HOST\"",
"\"XDG_CONFIG_HOME\""
]
| []
| [
"LAB_CORE_TOKEN",
"GITLAB_USER_LOGIN",
"CI_JOB_TOKEN",
"LAB_CORE_HOST",
"CI_PROJECT_URL",
"CI_PROJECT_PATH",
"XDG_CONFIG_HOME"
]
| [] | ["LAB_CORE_TOKEN", "GITLAB_USER_LOGIN", "CI_JOB_TOKEN", "LAB_CORE_HOST", "CI_PROJECT_URL", "CI_PROJECT_PATH", "XDG_CONFIG_HOME"] | go | 7 | 0 | |
brr/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "brr.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
learning_log/settings.py | """
Django settings for learning_log project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# My Apps
'learning_logs',
'users',
# Third party apps
'bootstrap4',
# Default Django Apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_log.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_log.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# My settings
LOGIN_URL = 'users:login'
# Heroku settings
import django_heroku
import os
django_heroku.settings(locals())
if os.environ.get('DEBUG') == 'TRUE':
DEBUG = True
elif os.environ.get('DEBUG') == 'FALSE':
DEBUG = False
| []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
service/src/java/org/apache/hive/service/server/HiveServer2.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hive.service.server;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.ACLProvider;
import org.apache.curator.framework.api.BackgroundCallback;
import org.apache.curator.framework.api.CuratorEvent;
import org.apache.curator.framework.api.CuratorEventType;
import org.apache.curator.framework.recipes.nodes.PersistentEphemeralNode;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.hadoop.hive.common.JvmPauseMonitor;
import org.apache.hadoop.hive.common.LogUtils;
import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl;
import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager;
import org.apache.hadoop.hive.ql.session.ClearDanglingScratchDir;
import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hive.common.util.HiveStringUtils;
import org.apache.hive.common.util.HiveVersionInfo;
import org.apache.hive.common.util.ShutdownHookManager;
import org.apache.hive.service.CompositeService;
import org.apache.hive.service.ServiceException;
import org.apache.hive.service.cli.CLIService;
import org.apache.hive.service.cli.thrift.ThriftBinaryCLIService;
import org.apache.hive.service.cli.thrift.ThriftCLIService;
import org.apache.hive.service.cli.thrift.ThriftHttpCLIService;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.ZooDefs.Perms;
import org.apache.zookeeper.data.ACL;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
/**
* HiveServer2.
*
*/
public class HiveServer2 extends CompositeService {
private static final Log LOG = LogFactory.getLog(HiveServer2.class);
private static CountDownLatch deleteSignal;
private CLIService cliService;
private ThriftCLIService thriftCLIService;
private PersistentEphemeralNode znode;
private String znodePath;
private CuratorFramework zooKeeperClient;
private boolean deregisteredWithZooKeeper = false; // Set to true only when deregistration happens
public HiveServer2() {
super(HiveServer2.class.getSimpleName());
HiveConf.setLoadHiveServer2Config(true);
}
@Override
public synchronized void init(HiveConf hiveConf) {
//Initialize metrics first, as some metrics are for initialization stuff.
try {
if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_METRICS_ENABLED)) {
MetricsFactory.init(hiveConf);
}
} catch (Throwable t) {
LOG.warn("Could not initiate the HiveServer2 Metrics system. Metrics may not be reported.", t);
}
cliService = new CLIService(this);
addService(cliService);
if (isHTTPTransportMode(hiveConf)) {
thriftCLIService = new ThriftHttpCLIService(cliService);
} else {
thriftCLIService = new ThriftBinaryCLIService(cliService);
}
addService(thriftCLIService);
super.init(hiveConf);
// Set host name in hiveConf
try {
hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, getServerHost());
} catch (Throwable t) {
throw new Error("Unable to intitialize HiveServer2", t);
}
// Add a shutdown hook for catching SIGTERM & SIGINT
final HiveServer2 hiveServer2 = this;
ShutdownHookManager.addShutdownHook(new Runnable() {
@Override
public void run() {
hiveServer2.stop();
}
});
}
public static boolean isHTTPTransportMode(HiveConf hiveConf) {
String transportMode = System.getenv("HIVE_SERVER2_TRANSPORT_MODE");
if (transportMode == null) {
transportMode = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE);
}
if (transportMode != null && (transportMode.equalsIgnoreCase("http"))) {
return true;
}
return false;
}
public static boolean isKerberosAuthMode(HiveConf hiveConf) {
String authMode = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION);
if (authMode != null && (authMode.equalsIgnoreCase("KERBEROS"))) {
return true;
}
return false;
}
/**
* ACLProvider for providing appropriate ACLs to CuratorFrameworkFactory
*/
private final ACLProvider zooKeeperAclProvider = new ACLProvider() {
@Override
public List<ACL> getDefaultAcl() {
List<ACL> nodeAcls = new ArrayList<ACL>();
if (UserGroupInformation.isSecurityEnabled()) {
// Read all to the world
nodeAcls.addAll(Ids.READ_ACL_UNSAFE);
// Create/Delete/Write/Admin to the authenticated user
nodeAcls.add(new ACL(Perms.ALL, Ids.AUTH_IDS));
} else {
// ACLs for znodes on a non-kerberized cluster
// Create/Read/Delete/Write/Admin to the world
nodeAcls.addAll(Ids.OPEN_ACL_UNSAFE);
}
return nodeAcls;
}
@Override
public List<ACL> getAclForPath(String path) {
return getDefaultAcl();
}
};
/**
* Adds a server instance to ZooKeeper as a znode.
*
* @param hiveConf
* @throws Exception
*/
private void addServerInstanceToZooKeeper(HiveConf hiveConf) throws Exception {
String zooKeeperEnsemble = ZooKeeperHiveHelper.getQuorumServers(hiveConf);
String rootNamespace = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE);
String instanceURI = getServerInstanceURI();
setUpZooKeeperAuth(hiveConf);
int sessionTimeout =
(int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT,
TimeUnit.MILLISECONDS);
int baseSleepTime =
(int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME,
TimeUnit.MILLISECONDS);
int maxRetries = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES);
// Create a CuratorFramework instance to be used as the ZooKeeper client
// Use the zooKeeperAclProvider to create appropriate ACLs
zooKeeperClient =
CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble)
.sessionTimeoutMs(sessionTimeout).aclProvider(zooKeeperAclProvider)
.retryPolicy(new ExponentialBackoffRetry(baseSleepTime, maxRetries)).build();
zooKeeperClient.start();
// Create the parent znodes recursively; ignore if the parent already exists.
try {
zooKeeperClient.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT)
.forPath(ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
LOG.info("Created the root name space: " + rootNamespace + " on ZooKeeper for HiveServer2");
} catch (KeeperException e) {
if (e.code() != KeeperException.Code.NODEEXISTS) {
LOG.fatal("Unable to create HiveServer2 namespace: " + rootNamespace + " on ZooKeeper", e);
throw e;
}
}
// Create a znode under the rootNamespace parent for this instance of the server
// Znode name: serverUri=host:port;version=versionInfo;sequence=sequenceNumber
try {
String pathPrefix =
ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace
+ ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + "serverUri=" + instanceURI + ";"
+ "version=" + HiveVersionInfo.getVersion() + ";" + "sequence=";
String znodeData = "";
if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS)) {
// HiveServer2 configs that this instance will publish to ZooKeeper,
// so that the clients can read these and configure themselves properly.
Map<String, String> confsToPublish = new HashMap<String, String>();
addConfsToPublish(hiveConf, confsToPublish);
// Publish configs for this instance as the data on the node
znodeData = Joiner.on(';').withKeyValueSeparator("=").join(confsToPublish);
} else {
znodeData = instanceURI;
}
byte[] znodeDataUTF8 = znodeData.getBytes(Charset.forName("UTF-8"));
znode =
new PersistentEphemeralNode(zooKeeperClient,
PersistentEphemeralNode.Mode.EPHEMERAL_SEQUENTIAL, pathPrefix, znodeDataUTF8);
znode.start();
// We'll wait for 120s for node creation
long znodeCreationTimeout = 120;
if (!znode.waitForInitialCreate(znodeCreationTimeout, TimeUnit.SECONDS)) {
throw new Exception("Max znode creation wait time: " + znodeCreationTimeout + "s exhausted");
}
setDeregisteredWithZooKeeper(false);
znodePath = znode.getActualPath();
// Set a watch on the znode
if (zooKeeperClient.checkExists().usingWatcher(new DeRegisterWatcher()).forPath(znodePath) == null) {
// No node exists, throw exception
throw new Exception("Unable to create znode for this HiveServer2 instance on ZooKeeper.");
}
LOG.info("Created a znode on ZooKeeper for HiveServer2 uri: " + instanceURI);
} catch (Exception e) {
LOG.fatal("Unable to create a znode for this server instance", e);
if (znode != null) {
znode.close();
}
throw (e);
}
}
/**
* Add conf keys, values that HiveServer2 will publish to ZooKeeper.
* @param hiveConf
*/
private void addConfsToPublish(HiveConf hiveConf, Map<String, String> confsToPublish) {
// Hostname
confsToPublish.put(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname,
hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST));
// Transport mode
confsToPublish.put(ConfVars.HIVE_SERVER2_TRANSPORT_MODE.varname,
hiveConf.getVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE));
// Transport specific confs
if (isHTTPTransportMode(hiveConf)) {
confsToPublish.put(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT.varname,
Integer.toString(hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT)));
confsToPublish.put(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH.varname,
hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH));
} else {
confsToPublish.put(ConfVars.HIVE_SERVER2_THRIFT_PORT.varname,
Integer.toString(hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT)));
confsToPublish.put(ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP.varname,
hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP));
}
// Auth specific confs
confsToPublish.put(ConfVars.HIVE_SERVER2_AUTHENTICATION.varname,
hiveConf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION));
if (isKerberosAuthMode(hiveConf)) {
confsToPublish.put(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname,
hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL));
}
// SSL conf
confsToPublish.put(ConfVars.HIVE_SERVER2_USE_SSL.varname,
Boolean.toString(hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL)));
}
/**
* For a kerberized cluster, we dynamically set up the client's JAAS conf.
*
* @param hiveConf
* @return
* @throws Exception
*/
private void setUpZooKeeperAuth(HiveConf hiveConf) throws Exception {
if (UserGroupInformation.isSecurityEnabled()) {
String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL);
if (principal.isEmpty()) {
throw new IOException("HiveServer2 Kerberos principal is empty");
}
String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB);
if (keyTabFile.isEmpty()) {
throw new IOException("HiveServer2 Kerberos keytab is empty");
}
// Install the JAAS Configuration for the runtime
Utils.setZookeeperClientKerberosJaasConfig(principal, keyTabFile);
}
}
/**
* The watcher class which sets the de-register flag when the znode corresponding to this server
* instance is deleted. Additionally, it shuts down the server if there are no more active client
* sessions at the time of receiving a 'NodeDeleted' notification from ZooKeeper.
*/
private class DeRegisterWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
if (event.getType().equals(Watcher.Event.EventType.NodeDeleted)) {
if (znode != null) {
try {
znode.close();
LOG.warn("This HiveServer2 instance is now de-registered from ZooKeeper. "
+ "The server will be shut down after the last client sesssion completes.");
} catch (IOException e) {
LOG.error("Failed to close the persistent ephemeral znode", e);
} finally {
HiveServer2.this.setDeregisteredWithZooKeeper(true);
// If there are no more active client sessions, stop the server
if (cliService.getSessionManager().getOpenSessionCount() == 0) {
LOG.warn("This instance of HiveServer2 has been removed from the list of server "
+ "instances available for dynamic service discovery. "
+ "The last client session has ended - will shutdown now.");
HiveServer2.this.stop();
}
}
}
}
}
}
private void removeServerInstanceFromZooKeeper() throws Exception {
setDeregisteredWithZooKeeper(true);
if (znode != null) {
znode.close();
}
zooKeeperClient.close();
LOG.info("Server instance removed from ZooKeeper.");
}
public boolean isDeregisteredWithZooKeeper() {
return deregisteredWithZooKeeper;
}
private void setDeregisteredWithZooKeeper(boolean deregisteredWithZooKeeper) {
this.deregisteredWithZooKeeper = deregisteredWithZooKeeper;
}
private String getServerInstanceURI() throws Exception {
if ((thriftCLIService == null) || (thriftCLIService.getServerIPAddress() == null)) {
throw new Exception("Unable to get the server address; it hasn't been initialized yet.");
}
return thriftCLIService.getServerIPAddress().getHostName() + ":"
+ thriftCLIService.getPortNumber();
}
private String getServerHost() throws Exception {
if ((thriftCLIService == null) || (thriftCLIService.getServerIPAddress() == null)) {
throw new Exception("Unable to get the server address; it hasn't been initialized yet.");
}
return thriftCLIService.getServerIPAddress().getHostName();
}
@Override
public synchronized void start() {
super.start();
// If we're supporting dynamic service discovery, we'll add the service uri
// for this HiveServer2 instance to Zookeeper as a znode.
HiveConf hiveConf = this.getHiveConf();
if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY)) {
try {
addServerInstanceToZooKeeper(hiveConf);
} catch (Exception e) {
LOG.error("Error adding this HiveServer2 instance to ZooKeeper: ", e);
throw new ServiceException(e);
}
}
}
@Override
public synchronized void stop() {
LOG.info("Shutting down HiveServer2");
HiveConf hiveConf = this.getHiveConf();
super.stop();
// Shutdown Metrics
if (MetricsFactory.getInstance() != null) {
try {
MetricsFactory.close();
} catch (Exception e) {
LOG.error("error in Metrics deinit: " + e.getClass().getName() + " "
+ e.getMessage(), e);
}
}
// Remove this server instance from ZooKeeper if dynamic service discovery is set
if (hiveConf != null && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY)) {
try {
removeServerInstanceFromZooKeeper();
} catch (Exception e) {
LOG.error("Error removing znode for this HiveServer2 instance from ZooKeeper.", e);
}
}
// There should already be an instance of the session pool manager.
// If not, ignoring is fine while stopping HiveServer2.
if (hiveConf != null && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS)) {
try {
TezSessionPoolManager.getInstance().stop();
} catch (Exception e) {
LOG.error("Tez session pool manager stop had an error during stop of HiveServer2. "
+ "Shutting down HiveServer2 anyway.", e);
}
}
if (hiveConf != null && hiveConf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
try {
SparkSessionManagerImpl.getInstance().shutdown();
} catch(Exception ex) {
LOG.error("Spark session pool manager failed to stop during HiveServer2 shutdown.", ex);
}
}
}
@VisibleForTesting
public static void scheduleClearDanglingScratchDir(HiveConf hiveConf, int initialWaitInSec) {
if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR)) {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(
new BasicThreadFactory.Builder()
.namingPattern("cleardanglingscratchdir-%d")
.daemon(true)
.build());
executor.scheduleAtFixedRate(new ClearDanglingScratchDir(false, false, false,
HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCHDIR), hiveConf), initialWaitInSec,
HiveConf.getTimeVar(hiveConf, ConfVars.HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL,
TimeUnit.SECONDS), TimeUnit.SECONDS);
}
}
private static void startHiveServer2() throws Throwable {
long attempts = 0, maxAttempts = 1;
while (true) {
LOG.info("Starting HiveServer2");
HiveConf hiveConf = new HiveConf();
maxAttempts = hiveConf.getLongVar(HiveConf.ConfVars.HIVE_SERVER2_MAX_START_ATTEMPTS);
HiveServer2 server = null;
try {
// Schedule task to cleanup dangling scratch dir periodically,
// initial wait for a random time between 0-10 min to
// avoid intial spike when using multiple HS2
scheduleClearDanglingScratchDir(hiveConf, new Random().nextInt(600));
server = new HiveServer2();
server.init(hiveConf);
server.start();
try {
JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(hiveConf);
pauseMonitor.start();
} catch (Throwable t) {
LOG.warn("Could not initiate the JvmPauseMonitor thread." + " GCs and Pauses may not be " +
"warned upon.", t);
}
if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS)) {
TezSessionPoolManager sessionPool = TezSessionPoolManager.getInstance();
sessionPool.setupPool(hiveConf);
sessionPool.startPool();
}
if (hiveConf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
SparkSessionManagerImpl.getInstance().setup(hiveConf);
}
break;
} catch (Throwable throwable) {
if (server != null) {
try {
server.stop();
} catch (Throwable t) {
LOG.info("Exception caught when calling stop of HiveServer2 before retrying start", t);
} finally {
server = null;
}
}
if (++attempts >= maxAttempts) {
throw new Error("Max start attempts " + maxAttempts + " exhausted", throwable);
} else {
LOG.warn("Error starting HiveServer2 on attempt " + attempts
+ ", will retry in 60 seconds", throwable);
try {
Thread.sleep(60L * 1000L);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
}
}
/**
* Remove all znodes corresponding to the given version number from ZooKeeper
*
* @param versionNumber
* @throws Exception
*/
static void deleteServerInstancesFromZooKeeper(String versionNumber) throws Exception {
HiveConf hiveConf = new HiveConf();
String zooKeeperEnsemble = ZooKeeperHiveHelper.getQuorumServers(hiveConf);
String rootNamespace = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE);
int baseSleepTime = (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME, TimeUnit.MILLISECONDS);
int maxRetries = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES);
CuratorFramework zooKeeperClient =
CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble)
.retryPolicy(new ExponentialBackoffRetry(baseSleepTime, maxRetries)).build();
zooKeeperClient.start();
List<String> znodePaths =
zooKeeperClient.getChildren().forPath(
ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
List<String> znodePathsUpdated;
// Now for each path that is for the given versionNumber, delete the znode from ZooKeeper
for (int i = 0; i < znodePaths.size(); i++) {
String znodePath = znodePaths.get(i);
deleteSignal = new CountDownLatch(1);
if (znodePath.contains("version=" + versionNumber + ";")) {
String fullZnodePath =
ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace
+ ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + znodePath;
LOG.warn("Will attempt to remove the znode: " + fullZnodePath + " from ZooKeeper");
System.out.println("Will attempt to remove the znode: " + fullZnodePath + " from ZooKeeper");
zooKeeperClient.delete().guaranteed().inBackground(new DeleteCallBack())
.forPath(fullZnodePath);
// Wait for the delete to complete
deleteSignal.await();
// Get the updated path list
znodePathsUpdated =
zooKeeperClient.getChildren().forPath(
ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
// Gives a list of any new paths that may have been created to maintain the persistent ephemeral node
znodePathsUpdated.removeAll(znodePaths);
// Add the new paths to the znodes list. We'll try for their removal as well.
znodePaths.addAll(znodePathsUpdated);
}
}
zooKeeperClient.close();
}
private static class DeleteCallBack implements BackgroundCallback {
@Override
public void processResult(CuratorFramework zooKeeperClient, CuratorEvent event)
throws Exception {
if (event.getType() == CuratorEventType.DELETE) {
deleteSignal.countDown();
}
}
}
public static void main(String[] args) {
HiveConf.setLoadHiveServer2Config(true);
try {
ServerOptionsProcessor oproc = new ServerOptionsProcessor("hiveserver2");
ServerOptionsProcessorResponse oprocResponse = oproc.parse(args);
// NOTE: It is critical to do this here so that log4j is reinitialized
// before any of the other core hive classes are loaded
String initLog4jMessage = LogUtils.initHiveLog4j();
LOG.debug(initLog4jMessage);
HiveStringUtils.startupShutdownMessage(HiveServer2.class, args, LOG);
// Log debug message from "oproc" after log4j initialize properly
LOG.debug(oproc.getDebugMessage().toString());
// Call the executor which will execute the appropriate command based on the parsed options
oprocResponse.getServerOptionsExecutor().execute();
} catch (LogInitializationException e) {
LOG.error("Error initializing log: " + e.getMessage(), e);
System.exit(-1);
}
}
/**
* ServerOptionsProcessor.
* Process arguments given to HiveServer2 (-hiveconf property=value)
* Set properties in System properties
* Create an appropriate response object,
* which has executor to execute the appropriate command based on the parsed options.
*/
static class ServerOptionsProcessor {
private final Options options = new Options();
private org.apache.commons.cli.CommandLine commandLine;
private final String serverName;
private final StringBuilder debugMessage = new StringBuilder();
@SuppressWarnings("static-access")
ServerOptionsProcessor(String serverName) {
this.serverName = serverName;
// -hiveconf x=y
options.addOption(OptionBuilder
.withValueSeparator()
.hasArgs(2)
.withArgName("property=value")
.withLongOpt("hiveconf")
.withDescription("Use value for given property")
.create());
// -deregister <versionNumber>
options.addOption(OptionBuilder
.hasArgs(1)
.withArgName("versionNumber")
.withLongOpt("deregister")
.withDescription("Deregister all instances of given version from dynamic service discovery")
.create());
options.addOption(new Option("H", "help", false, "Print help information"));
}
ServerOptionsProcessorResponse parse(String[] argv) {
try {
commandLine = new GnuParser().parse(options, argv);
// Process --hiveconf
// Get hiveconf param values and set the System property values
Properties confProps = commandLine.getOptionProperties("hiveconf");
for (String propKey : confProps.stringPropertyNames()) {
// save logging message for log4j output latter after log4j initialize properly
debugMessage.append("Setting " + propKey + "=" + confProps.getProperty(propKey) + ";\n");
System.setProperty(propKey, confProps.getProperty(propKey));
}
// Process --help
if (commandLine.hasOption('H')) {
return new ServerOptionsProcessorResponse(new HelpOptionExecutor(serverName, options));
}
// Process --deregister
if (commandLine.hasOption("deregister")) {
return new ServerOptionsProcessorResponse(new DeregisterOptionExecutor(
commandLine.getOptionValue("deregister")));
}
} catch (ParseException e) {
// Error out & exit - we were not able to parse the args successfully
System.err.println("Error starting HiveServer2 with given arguments: ");
System.err.println(e.getMessage());
System.exit(-1);
}
// Default executor, when no option is specified
return new ServerOptionsProcessorResponse(new StartOptionExecutor());
}
StringBuilder getDebugMessage() {
return debugMessage;
}
}
/**
* The response sent back from {@link ServerOptionsProcessor#parse(String[])}
*/
static class ServerOptionsProcessorResponse {
private final ServerOptionsExecutor serverOptionsExecutor;
ServerOptionsProcessorResponse(ServerOptionsExecutor serverOptionsExecutor) {
this.serverOptionsExecutor = serverOptionsExecutor;
}
ServerOptionsExecutor getServerOptionsExecutor() {
return serverOptionsExecutor;
}
}
/**
* The executor interface for running the appropriate HiveServer2 command based on parsed options
*/
static interface ServerOptionsExecutor {
public void execute();
}
/**
* HelpOptionExecutor: executes the --help option by printing out the usage
*/
static class HelpOptionExecutor implements ServerOptionsExecutor {
private final Options options;
private final String serverName;
HelpOptionExecutor(String serverName, Options options) {
this.options = options;
this.serverName = serverName;
}
@Override
public void execute() {
new HelpFormatter().printHelp(serverName, options);
System.exit(0);
}
}
/**
* StartOptionExecutor: starts HiveServer2.
* This is the default executor, when no option is specified.
*/
static class StartOptionExecutor implements ServerOptionsExecutor {
@Override
public void execute() {
try {
startHiveServer2();
} catch (Throwable t) {
LOG.fatal("Error starting HiveServer2", t);
System.exit(-1);
}
}
}
/**
* DeregisterOptionExecutor: executes the --deregister option by deregistering all HiveServer2
* instances from ZooKeeper of a specific version.
*/
static class DeregisterOptionExecutor implements ServerOptionsExecutor {
private final String versionNumber;
DeregisterOptionExecutor(String versionNumber) {
this.versionNumber = versionNumber;
}
@Override
public void execute() {
try {
deleteServerInstancesFromZooKeeper(versionNumber);
} catch (Exception e) {
LOG.fatal("Error deregistering HiveServer2 instances for version: " + versionNumber
+ " from ZooKeeper", e);
System.out.println("Error deregistering HiveServer2 instances for version: " + versionNumber
+ " from ZooKeeper." + e);
System.exit(-1);
}
System.exit(0);
}
}
}
| [
"\"HIVE_SERVER2_TRANSPORT_MODE\""
]
| []
| [
"HIVE_SERVER2_TRANSPORT_MODE"
]
| [] | ["HIVE_SERVER2_TRANSPORT_MODE"] | java | 1 | 0 | |
pkg/common/config/config.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"github.com/golang/glog"
"gopkg.in/gcfg.v1"
)
const (
DefaultRoundTripperCount uint = 3
DefaultAPIBinding string = ":43001"
DefaultK8sServiceAccount string = "cloud-controller-manager"
DefaultVCenterPort string = "443"
DefaultSecretDirectory string = "/etc/cloud/secrets"
)
// Error Messages
const (
MissingUsernameErrMsg = "Username is missing"
MissingPasswordErrMsg = "Password is missing"
InvalidVCenterIPErrMsg = "vsphere.conf does not have the VirtualCenter IP address specified"
)
// Error constants
var (
ErrUsernameMissing = errors.New(MissingUsernameErrMsg)
ErrPasswordMissing = errors.New(MissingPasswordErrMsg)
ErrInvalidVCenterIP = errors.New(InvalidVCenterIPErrMsg)
)
func getEnvKeyValue(match string, partial bool) (string, string, error) {
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
if len(pair) != 2 {
continue
}
key := pair[0]
value := pair[1]
if partial && strings.Contains(key, match) {
return key, value, nil
}
if strings.Compare(key, match) == 0 {
return key, value, nil
}
}
matchType := "match"
if partial {
matchType = "partial match"
}
return "", "", fmt.Errorf("Failed to find %s with %s", matchType, match)
}
//ConfigFromEnv allows setting configuration via environment variables.
func ConfigFromEnv() (cfg Config, ok bool) {
var err error
//Init
cfg.VirtualCenter = make(map[string]*VirtualCenterConfig)
//Globals
cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER")
cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT")
cfg.Global.User = os.Getenv("VSPHERE_USER")
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
cfg.Global.Datacenters = os.Getenv("VSPHERE_DATACENTER")
cfg.Global.SecretName = os.Getenv("VSPHERE_SECRET_NAME")
cfg.Global.SecretNamespace = os.Getenv("VSPHERE_SECRET_NAMESPACE")
cfg.Global.ServiceAccount = os.Getenv("VSPHERE_SERVICE_ACCOUNT")
var RoundTripCount uint
if os.Getenv("VSPHERE_ROUNDTRIP_COUNT") != "" {
var tmp uint64
tmp, err = strconv.ParseUint(os.Getenv("VSPHERE_ROUNDTRIP_COUNT"), 10, 32)
RoundTripCount = uint(tmp)
} else {
RoundTripCount = DefaultRoundTripperCount
}
if err != nil {
glog.Fatalf("Failed to parse VSPHERE_ROUNDTRIP_COUNT: %s", err)
}
cfg.Global.RoundTripperCount = RoundTripCount
var InsecureFlag bool
if os.Getenv("VSPHERE_INSECURE") != "" {
InsecureFlag, err = strconv.ParseBool(os.Getenv("VSPHERE_INSECURE"))
} else {
InsecureFlag = false
}
if err != nil {
glog.Errorf("Failed to parse VSPHERE_INSECURE: %s", err)
InsecureFlag = false
}
cfg.Global.InsecureFlag = InsecureFlag
var APIDisable bool
if os.Getenv("VSPHERE_API_DISABLE") != "" {
APIDisable, err = strconv.ParseBool(os.Getenv("VSPHERE_API_DISABLE"))
} else {
APIDisable = true
}
if err != nil {
glog.Errorf("Failed to parse VSPHERE_API_DISABLE: %s", err)
APIDisable = true
}
cfg.Global.APIDisable = APIDisable
var APIBinding string
if os.Getenv("VSPHERE_API_BINDING") != "" {
APIBinding = os.Getenv("VSPHERE_API_BINDING")
} else {
APIBinding = DefaultAPIBinding
}
cfg.Global.APIBinding = APIBinding
var SecretsDirectory string
if os.Getenv("VSPHERE_SECRETS_DIRECTORY") != "" {
SecretsDirectory = os.Getenv("VSPHERE_SECRETS_DIRECTORY")
} else {
SecretsDirectory = DefaultSecretDirectory
}
if _, err := os.Stat(SecretsDirectory); os.IsNotExist(err) {
SecretsDirectory = "" //Dir does not exist, set to empty string
}
cfg.Global.SecretsDirectory = SecretsDirectory
cfg.Global.CAFile = os.Getenv("VSPHERE_CAFILE")
cfg.Global.Thumbprint = os.Getenv("VSPHERE_THUMBPRINT")
cfg.Labels.Region = os.Getenv("VSPHERE_LABEL_REGION")
cfg.Labels.Zone = os.Getenv("VSPHERE_LABEL_ZONE")
//Build VirtualCenter from ENVs
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
if len(pair) != 2 {
continue
}
key := pair[0]
value := pair[1]
if strings.HasPrefix(key, "VSPHERE_VCENTER_") && len(value) > 0 {
id := strings.TrimPrefix(key, "VSPHERE_VCENTER_")
vcenter := value
_, username, errUsername := getEnvKeyValue("VCENTER_"+id+"_USERNAME", false)
if errUsername != nil {
username = cfg.Global.User
}
_, password, errPassword := getEnvKeyValue("VCENTER_"+id+"_PASSWORD", false)
if errPassword != nil {
password = cfg.Global.Password
}
_, port, errPort := getEnvKeyValue("VCENTER_"+id+"_PORT", false)
if errPort != nil {
port = cfg.Global.VCenterPort
}
insecureFlag := false
_, insecureTmp, errInsecure := getEnvKeyValue("VCENTER_"+id+"_INSECURE", false)
if errInsecure != nil {
insecureFlagTmp, errTmp := strconv.ParseBool(insecureTmp)
if errTmp == nil {
insecureFlag = insecureFlagTmp
}
}
_, datacenters, errDatacenters := getEnvKeyValue("VCENTER_"+id+"_DATACENTERS", false)
if errDatacenters != nil {
datacenters = cfg.Global.Datacenters
}
roundtrip := DefaultRoundTripperCount
_, roundtripTmp, errRoundtrip := getEnvKeyValue("VCENTER_"+id+"_ROUNDTRIP", false)
if errRoundtrip != nil {
roundtripFlagTmp, errTmp := strconv.ParseUint(roundtripTmp, 10, 32)
if errTmp == nil {
roundtrip = uint(roundtripFlagTmp)
}
}
_, caFile, errCaFile := getEnvKeyValue("VCENTER_"+id+"_CAFILE", false)
if errCaFile != nil {
caFile = cfg.Global.CAFile
}
_, thumbprint, errThumbprint := getEnvKeyValue("VCENTER_"+id+"_THUMBPRINT", false)
if errThumbprint != nil {
thumbprint = cfg.Global.Thumbprint
}
cfg.VirtualCenter[vcenter] = &VirtualCenterConfig{
User: username,
Password: password,
VCenterPort: port,
InsecureFlag: insecureFlag,
Datacenters: datacenters,
RoundTripperCount: roundtrip,
CAFile: caFile,
Thumbprint: thumbprint,
}
}
}
if cfg.Global.VCenterIP != "" && cfg.VirtualCenter[cfg.Global.VCenterIP] == nil {
cfg.VirtualCenter[cfg.Global.VCenterIP] = &VirtualCenterConfig{
User: cfg.Global.User,
Password: cfg.Global.Password,
VCenterPort: cfg.Global.VCenterPort,
InsecureFlag: cfg.Global.InsecureFlag,
Datacenters: cfg.Global.Datacenters,
RoundTripperCount: cfg.Global.RoundTripperCount,
CAFile: cfg.Global.CAFile,
Thumbprint: cfg.Global.Thumbprint,
}
}
//Valid config?
for _, vcConfig := range cfg.VirtualCenter {
if (vcConfig.User == "" && vcConfig.Password == "") ||
(vcConfig.CAFile == "" && vcConfig.Thumbprint == "") {
ok = false
return
}
}
ok = (cfg.Global.VCenterIP != "" && cfg.Global.User != "" && cfg.Global.Password != "")
return
}
func fixUpConfigFromFile(cfg *Config) error {
//Fix default global values
if cfg.Global.RoundTripperCount == 0 {
cfg.Global.RoundTripperCount = DefaultRoundTripperCount
}
if cfg.Global.ServiceAccount == "" {
cfg.Global.ServiceAccount = DefaultK8sServiceAccount
}
if cfg.Global.VCenterPort == "" {
cfg.Global.VCenterPort = DefaultVCenterPort
}
isSecretInfoProvided := true
if (cfg.Global.SecretName == "" || cfg.Global.SecretNamespace == "") && cfg.Global.SecretsDirectory == "" {
isSecretInfoProvided = false
}
// vsphere.conf is no longer supported in the old format.
for vcServer, vcConfig := range cfg.VirtualCenter {
glog.V(4).Infof("Initializing vc server %s", vcServer)
if vcServer == "" {
glog.Error(InvalidVCenterIPErrMsg)
return ErrInvalidVCenterIP
}
if !isSecretInfoProvided {
if vcConfig.User == "" {
vcConfig.User = cfg.Global.User
if vcConfig.User == "" {
glog.Errorf("vcConfig.User is empty for vc %s!", vcServer)
return ErrUsernameMissing
}
}
if vcConfig.Password == "" {
vcConfig.Password = cfg.Global.Password
if vcConfig.Password == "" {
glog.Errorf("vcConfig.Password is empty for vc %s!", vcServer)
return ErrPasswordMissing
}
}
}
if vcConfig.VCenterPort == "" {
vcConfig.VCenterPort = cfg.Global.VCenterPort
}
if vcConfig.Datacenters == "" {
if cfg.Global.Datacenters != "" {
vcConfig.Datacenters = cfg.Global.Datacenters
}
}
if vcConfig.RoundTripperCount == 0 {
vcConfig.RoundTripperCount = cfg.Global.RoundTripperCount
}
if vcConfig.CAFile == "" {
vcConfig.CAFile = cfg.Global.CAFile
}
if vcConfig.Thumbprint == "" {
vcConfig.Thumbprint = cfg.Global.Thumbprint
}
insecure := vcConfig.InsecureFlag
if !insecure {
insecure = cfg.Global.InsecureFlag
vcConfig.InsecureFlag = cfg.Global.InsecureFlag
}
}
// Create a single instance of VSphereInstance for the Global VCenterIP if the
// VirtualCenter does not already exist in the map
if !isSecretInfoProvided && cfg.Global.VCenterIP != "" && cfg.VirtualCenter[cfg.Global.VCenterIP] == nil {
vcConfig := &VirtualCenterConfig{
User: cfg.Global.User,
Password: cfg.Global.Password,
VCenterPort: cfg.Global.VCenterPort,
InsecureFlag: cfg.Global.InsecureFlag,
Datacenters: cfg.Global.Datacenters,
RoundTripperCount: cfg.Global.RoundTripperCount,
CAFile: cfg.Global.CAFile,
Thumbprint: cfg.Global.Thumbprint,
}
cfg.VirtualCenter[cfg.Global.VCenterIP] = vcConfig
}
return nil
}
//ReadConfig parses vSphere cloud config file and stores it into VSphereConfig.
func ReadConfig(config io.Reader) (Config, error) {
if config == nil {
return Config{}, fmt.Errorf("no vSphere cloud provider config file given")
}
cfg, _ := ConfigFromEnv()
err := gcfg.FatalOnly(gcfg.ReadInto(&cfg, config))
if err != nil {
return cfg, err
}
err = fixUpConfigFromFile(&cfg)
return cfg, err
}
| [
"\"VSPHERE_VCENTER\"",
"\"VSPHERE_VCENTER_PORT\"",
"\"VSPHERE_USER\"",
"\"VSPHERE_PASSWORD\"",
"\"VSPHERE_DATACENTER\"",
"\"VSPHERE_SECRET_NAME\"",
"\"VSPHERE_SECRET_NAMESPACE\"",
"\"VSPHERE_SERVICE_ACCOUNT\"",
"\"VSPHERE_ROUNDTRIP_COUNT\"",
"\"VSPHERE_ROUNDTRIP_COUNT\"",
"\"VSPHERE_INSECURE\"",
"\"VSPHERE_INSECURE\"",
"\"VSPHERE_API_DISABLE\"",
"\"VSPHERE_API_DISABLE\"",
"\"VSPHERE_API_BINDING\"",
"\"VSPHERE_API_BINDING\"",
"\"VSPHERE_SECRETS_DIRECTORY\"",
"\"VSPHERE_SECRETS_DIRECTORY\"",
"\"VSPHERE_CAFILE\"",
"\"VSPHERE_THUMBPRINT\"",
"\"VSPHERE_LABEL_REGION\"",
"\"VSPHERE_LABEL_ZONE\""
]
| []
| [
"VSPHERE_LABEL_REGION",
"VSPHERE_VCENTER",
"VSPHERE_INSECURE",
"VSPHERE_DATACENTER",
"VSPHERE_ROUNDTRIP_COUNT",
"VSPHERE_USER",
"VSPHERE_PASSWORD",
"VSPHERE_LABEL_ZONE",
"VSPHERE_SECRET_NAME",
"VSPHERE_THUMBPRINT",
"VSPHERE_API_BINDING",
"VSPHERE_SECRETS_DIRECTORY",
"VSPHERE_SECRET_NAMESPACE",
"VSPHERE_CAFILE",
"VSPHERE_API_DISABLE",
"VSPHERE_VCENTER_PORT",
"VSPHERE_SERVICE_ACCOUNT"
]
| [] | ["VSPHERE_LABEL_REGION", "VSPHERE_VCENTER", "VSPHERE_INSECURE", "VSPHERE_DATACENTER", "VSPHERE_ROUNDTRIP_COUNT", "VSPHERE_USER", "VSPHERE_PASSWORD", "VSPHERE_LABEL_ZONE", "VSPHERE_SECRET_NAME", "VSPHERE_THUMBPRINT", "VSPHERE_API_BINDING", "VSPHERE_SECRETS_DIRECTORY", "VSPHERE_SECRET_NAMESPACE", "VSPHERE_CAFILE", "VSPHERE_API_DISABLE", "VSPHERE_VCENTER_PORT", "VSPHERE_SERVICE_ACCOUNT"] | go | 17 | 0 | |
aliyun-python-sdk-core/aliyunsdkcore/http/http_response.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
import os
import logging
from aliyunsdkcore.vendored.requests import Request, Session
from aliyunsdkcore.vendored.requests.packages import urllib3
from aliyunsdkcore.http.http_request import HttpRequest
from aliyunsdkcore.http import protocol_type as PT
from aliyunsdkcore.vendored.requests import status_codes
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
logger.addHandler(ch)
DEFAULT_CONNECT_TIMEOUT = 5
class HttpResponse(HttpRequest):
def __init__(
self,
host="",
url="/",
method="GET",
headers={},
protocol=PT.HTTP,
content=None,
port=None,
key_file=None,
cert_file=None,
read_timeout=None,
connect_timeout=None):
HttpRequest.__init__(
self,
host=host,
url=url,
method=method,
headers=headers)
self.__ssl_enable = False
if protocol is PT.HTTPS:
self.__ssl_enable = True
self.__key_file = key_file
self.__cert_file = cert_file
self.__port = port
self.__connection = None
self.__read_timeout = read_timeout
self.__connect_timeout = connect_timeout
self.set_body(content)
def set_ssl_enable(self, enable):
self.__ssl_enable = enable
def get_ssl_enabled(self):
return self.__ssl_enable
@staticmethod
def prepare_http_debug(request, symbol):
base = ''
for key, value in request.headers.items():
base += '\n%s %s : %s' % (symbol, key, value)
return base
def do_http_debug(self, request, response):
# logger the request
request_base = '\n> %s %s HTTP/1.1' % (self.get_method().upper(), self.get_url())
request_base += '\n> Host : %s' % self.get_host()
logger.debug(request_base + self.prepare_http_debug(request, '>'))
# logger the response
response_base = '\n< HTTP/1.1 %s %s' % (
response.status_code, status_codes._codes.get(response.status_code)[0].upper())
logger.debug(response_base + self.prepare_http_debug(response, '<'))
def get_response_object(self):
with Session() as s:
current_protocol = 'https://' if self.get_ssl_enabled() else 'http://'
url = current_protocol + self.get_host() + self.get_url()
if self.__port != 80:
url = current_protocol + self.get_host() + ":" + str(self.__port) + self.get_url()
req = Request(method=self.get_method(), url=url,
data=self.get_body(),
headers=self.get_headers(),
)
prepped = s.prepare_request(req)
proxy_https = os.environ.get('HTTPS_PROXY') or os.environ.get(
'https_proxy')
proxy_http = os.environ.get(
'HTTP_PROXY') or os.environ.get('http_proxy')
proxies = {
"http": proxy_http,
"https": proxy_https,
}
# ignore the warning-InsecureRequestWarning
urllib3.disable_warnings()
response = s.send(prepped, proxies=proxies,
timeout=(self.__connect_timeout, self.__read_timeout),
allow_redirects=False, verify=None, cert=None)
http_debug = os.environ.get('DEBUG')
if http_debug is not None and http_debug.lower() == 'sdk':
# http debug information
self.do_http_debug(prepped, response)
return response.status_code, response.headers, response.content
| []
| []
| [
"https_proxy",
"HTTP_PROXY",
"DEBUG",
"HTTPS_PROXY",
"http_proxy"
]
| [] | ["https_proxy", "HTTP_PROXY", "DEBUG", "HTTPS_PROXY", "http_proxy"] | python | 5 | 0 | |
BQ_Load_sdwanB2BSlamLog.py | #**************************************************************************
#Script Name : BQ_Load_sdwanB2BSlamLog.py
#Description : This script will load data in bwMonLog BQ table.
#Created by : Vibhor Gupta
#Version Author Created Date Comments
#1.0 Vibhor 2020-09-15 Initial version
#****************************************************************
#------------Import Lib-----------------------#
import apache_beam as beam
from apache_beam import window
from apache_beam.transforms.window import FixedWindows
from apache_beam.options.pipeline_options import PipelineOptions, StandardOptions
import os, sys
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import ReadPTransformOverride
import argparse
import logging
from apache_beam.options.pipeline_options import SetupOptions
#------------Set up BQ parameters-----------------------#
# Replace with Project Id
project = 'PROJ'
#-------------Splitting Of Records----------------------#
class Transaction(beam.DoFn):
def process(self, element):
return [{"partition_date": element[0].split('.')[0], "C1": element[0], "applianceName": element[1].split('=')[1], "tenantName": element[2].split('=')[1], "localAccCktName": element[3].split('=')[1], "remoteAccCktName": element[4].split('=')[1], "localSiteName": element[5].split('=')[1], "remoteSiteName": element[6].split('=')[1], "fwdClass": element[7].split('=')[1], "tenantId": element[8].split('=')[1], "delay": element[9].split('=')[1], "fwdDelayVar": element[10].split('=')[1], "revDelayVar": element[11].split('=')[1], "fwdLoss": element[12].split('=')[1], "revLoss": element[13].split('=')[1], "fwdLossRatio": element[14].split('=')[1], "revLossRatio": element[15].split('=')[1], "pduLossRatio": element[16].split('=')[1], "fwdSent": element[17].split('=')[1], "revSent": element[18].split('=')[1]}]
#------------Apache Beam Code to load data over BQ Table-----------------------#
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
help='Input file to process.')
parser.add_argument(
'--pro_id',
dest='pro_id',
type=str,
default='xxxxxxxxxx',
help='project id')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p1 = beam.Pipeline(options=pipeline_options)
#data_f = sys.argv[1]
logging.info('***********')
logging.info(known_args.input)
data_loading = (
p1
|'Read from File' >> beam.io.ReadFromText(known_args.input,skip_header_lines=0)
|'Spliting of Fields' >> beam.Map(lambda record: record.split(','))
|'Clean-Data' >> beam.ParDo(Transaction())
)
project_id = "PROJ"
dataset_id = 'Prod_Networking'
table_id = known_args.pro_id
table_schema = ('partition_date:DATETIME, C1:STRING, applianceName:STRING, tenantName:STRING, localAccCktName:STRING, remoteAccCktName:STRING, localSiteName:STRING, remoteSiteName:STRING, fwdClass:STRING, tenantId:INTEGER, delay:INTEGER, fwdDelayVar:INTEGER, revDelayVar:INTEGER, fwdLoss:INTEGER, revLoss:INTEGER, fwdLossRatio:STRING, revLossRatio:STRING, pduLossRatio:STRING, fwdSent:INTEGER, revSent:INTEGER')
# Persist to BigQuery
# WriteToBigQuery accepts the data as list of JSON objects
result = (
data_loading
| 'Write-dwanB2BSlamLog' >> beam.io.WriteToBigQuery(
table=table_id,
dataset=dataset_id,
project=project_id,
schema=table_schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
))
#-------------------------------End -------------------------------------------------------------------------------------------------------------
result = p1.run()
result.wait_until_finish()
if __name__ == '__main__':
#logging.getLogger().setLevel(logging.INFO)
path_service_account = '/home/vibhg/PROJ-fbft436-jh4527.json'
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = path_service_account
run()
| []
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | python | 1 | 0 | |
bytechomp/datatypes/__init__.py | """
bytechomp.datatypes
"""
from bytechomp.datatypes.declarations import *
| []
| []
| []
| [] | [] | python | null | null | null |
vendor/github.com/xenolf/lego/providers/dns/vegadns/vegadns.go | // Package vegadns implements a DNS provider for solving the DNS-01
// challenge using VegaDNS.
package vegadns
import (
"errors"
"fmt"
"os"
"strings"
"time"
vegaClient "github.com/OpenDNS/vegadns2client"
"github.com/xenolf/lego/acme"
"github.com/xenolf/lego/platform/config/env"
)
// Config is used to configure the creation of the DNSProvider
type Config struct {
BaseURL string
APIKey string
APISecret string
PropagationTimeout time.Duration
PollingInterval time.Duration
TTL int
}
// NewDefaultConfig returns a default configuration for the DNSProvider
func NewDefaultConfig() *Config {
return &Config{
TTL: env.GetOrDefaultInt("VEGADNS_TTL", 10),
PropagationTimeout: env.GetOrDefaultSecond("VEGADNS_PROPAGATION_TIMEOUT", 12*time.Minute),
PollingInterval: env.GetOrDefaultSecond("VEGADNS_POLLING_INTERVAL", 1*time.Minute),
}
}
// DNSProvider describes a provider for VegaDNS
type DNSProvider struct {
config *Config
client vegaClient.VegaDNSClient
}
// NewDNSProvider returns a DNSProvider instance configured for VegaDNS.
// Credentials must be passed in the environment variables:
// VEGADNS_URL, SECRET_VEGADNS_KEY, SECRET_VEGADNS_SECRET.
func NewDNSProvider() (*DNSProvider, error) {
values, err := env.Get("VEGADNS_URL")
if err != nil {
return nil, fmt.Errorf("vegadns: %v", err)
}
config := NewDefaultConfig()
config.BaseURL = values["VEGADNS_URL"]
config.APIKey = os.Getenv("SECRET_VEGADNS_KEY")
config.APISecret = os.Getenv("SECRET_VEGADNS_SECRET")
return NewDNSProviderConfig(config)
}
// NewDNSProviderCredentials uses the supplied credentials
// to return a DNSProvider instance configured for VegaDNS.
// Deprecated
func NewDNSProviderCredentials(vegaDNSURL string, key string, secret string) (*DNSProvider, error) {
config := NewDefaultConfig()
config.BaseURL = vegaDNSURL
config.APIKey = key
config.APISecret = secret
return NewDNSProviderConfig(config)
}
// NewDNSProviderConfig return a DNSProvider instance configured for VegaDNS.
func NewDNSProviderConfig(config *Config) (*DNSProvider, error) {
if config == nil {
return nil, errors.New("vegadns: the configuration of the DNS provider is nil")
}
vega := vegaClient.NewVegaDNSClient(config.BaseURL)
vega.APIKey = config.APIKey
vega.APISecret = config.APISecret
return &DNSProvider{client: vega, config: config}, nil
}
// Timeout returns the timeout and interval to use when checking for DNS propagation.
// Adjusting here to cope with spikes in propagation times.
func (d *DNSProvider) Timeout() (timeout, interval time.Duration) {
return d.config.PropagationTimeout, d.config.PollingInterval
}
// Present creates a TXT record to fulfil the dns-01 challenge
func (d *DNSProvider) Present(domain, token, keyAuth string) error {
fqdn, value, _ := acme.DNS01Record(domain, keyAuth)
_, domainID, err := d.client.GetAuthZone(fqdn)
if err != nil {
return fmt.Errorf("vegadns: can't find Authoritative Zone for %s in Present: %v", fqdn, err)
}
err = d.client.CreateTXT(domainID, fqdn, value, d.config.TTL)
if err != nil {
return fmt.Errorf("vegadns: %v", err)
}
return nil
}
// CleanUp removes the TXT record matching the specified parameters
func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
fqdn, _, _ := acme.DNS01Record(domain, keyAuth)
_, domainID, err := d.client.GetAuthZone(fqdn)
if err != nil {
return fmt.Errorf("vegadns: can't find Authoritative Zone for %s in CleanUp: %v", fqdn, err)
}
txt := strings.TrimSuffix(fqdn, ".")
recordID, err := d.client.GetRecordID(domainID, txt, "TXT")
if err != nil {
return fmt.Errorf("vegadns: couldn't get Record ID in CleanUp: %s", err)
}
err = d.client.DeleteRecord(recordID)
if err != nil {
return fmt.Errorf("vegadns: %v", err)
}
return nil
}
| [
"\"SECRET_VEGADNS_KEY\"",
"\"SECRET_VEGADNS_SECRET\""
]
| []
| [
"SECRET_VEGADNS_KEY",
"SECRET_VEGADNS_SECRET"
]
| [] | ["SECRET_VEGADNS_KEY", "SECRET_VEGADNS_SECRET"] | go | 2 | 0 | |
src/main/java/com/ibm/watson/apis/conversation_enhanced/discovery/DiscoveryQuery.java | /**
* (C) Copyright IBM Corp. 2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.ibm.watson.apis.conversation_enhanced.discovery;
import com.ibm.watson.apis.conversation_enhanced.utils.Constants;
import com.ibm.watson.developer_cloud.discovery.v1.Discovery;
import com.ibm.watson.developer_cloud.discovery.v1.model.query.QueryRequest;
import com.ibm.watson.developer_cloud.discovery.v1.model.query.QueryResponse;
public class DiscoveryQuery {
private String userName;
private String password;
private String collectionId;
private String environmentId;
private Discovery discovery;
public DiscoveryQuery() {
userName = System.getenv("DISCOVERY_USERNAME");
password = System.getenv("DISCOVERY_PASSWORD");
collectionId = System.getenv("DISCOVERY_COLLECTION_ID");
environmentId = System.getenv("DISCOVERY_ENVIRONMENT_ID");
discovery = new Discovery(Constants.DISCOVERY_VERSION);
discovery.setEndPoint(Constants.DISCOVERY_URL);
discovery.setUsernameAndPassword(userName, password);
}
/**
* Use the Watson Developer Cloud SDK to send the user's query to the
* discovery service
*
* @param userQuery
* The user's query to be sent to the discovery service
* @return The query responses obtained from the discovery service
* @throws Exception
*/
public QueryResponse query(String userQuery) throws Exception {
QueryRequest.Builder queryBuilder = new QueryRequest.Builder(environmentId, collectionId);
StringBuilder sb = new StringBuilder();
sb.append("searchText:");
sb.append(userQuery);
sb.append(",");
sb.append("enrichedText:");
sb.append(userQuery);
queryBuilder.query(sb.toString());
QueryResponse queryResponse = discovery.query(queryBuilder.build()).execute();
return queryResponse;
}
}
| [
"\"DISCOVERY_USERNAME\"",
"\"DISCOVERY_PASSWORD\"",
"\"DISCOVERY_COLLECTION_ID\"",
"\"DISCOVERY_ENVIRONMENT_ID\""
]
| []
| [
"DISCOVERY_ENVIRONMENT_ID",
"DISCOVERY_PASSWORD",
"DISCOVERY_COLLECTION_ID",
"DISCOVERY_USERNAME"
]
| [] | ["DISCOVERY_ENVIRONMENT_ID", "DISCOVERY_PASSWORD", "DISCOVERY_COLLECTION_ID", "DISCOVERY_USERNAME"] | java | 4 | 0 | |
pkg/k8s/apps/translate.go | // Copyright 2021 The Okteto Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apps
import (
"encoding/json"
"fmt"
"os"
"path"
"strings"
"github.com/okteto/okteto/pkg/k8s/annotations"
"github.com/okteto/okteto/pkg/k8s/labels"
"github.com/okteto/okteto/pkg/log"
"github.com/okteto/okteto/pkg/model"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
)
const (
oktetoVersionAnnotation = "dev.okteto.com/version"
revisionAnnotation = "deployment.kubernetes.io/revision"
//OktetoBinName name of the okteto bin init container
OktetoBinName = "okteto-bin"
//OktetoInitVolumeContainerName name of the okteto init container that initializes the persistent colume from image content
OktetoInitVolumeContainerName = "okteto-init-volume"
//syncthing
oktetoSyncSecretVolume = "okteto-sync-secret" // skipcq GSC-G101 not a secret
oktetoDevSecretVolume = "okteto-dev-secret" // skipcq GSC-G101 not a secret
oktetoSecretTemplate = "okteto-%s"
)
var (
devReplicas int32 = 1
devTerminationGracePeriodSeconds int64
falseBoolean = false
)
func translate(t *model.Translation, c *kubernetes.Clientset, isOktetoNamespace bool) error {
for _, rule := range t.Rules {
devContainer := GetDevContainer(&t.K8sObject.PodTemplateSpec.Spec, rule.Container)
if devContainer == nil {
return fmt.Errorf("container '%s' not found in deployment '%s'", rule.Container, t.K8sObject.Name)
}
rule.Container = devContainer.Name
}
if t.K8sObject.ObjectType == model.DeploymentObjectType {
manifest := annotations.Get(t.K8sObject.GetObjectMeta(), model.DeploymentAnnotation)
if manifest != "" {
dOrig := &appsv1.Deployment{}
if err := json.Unmarshal([]byte(manifest), dOrig); err != nil {
return err
}
t.K8sObject.Deployment = dOrig
}
} else {
manifest := annotations.Get(t.K8sObject.GetObjectMeta(), model.StatefulsetAnnotation)
if manifest != "" {
sfsOrig := &appsv1.StatefulSet{}
if err := json.Unmarshal([]byte(manifest), sfsOrig); err != nil {
return err
}
t.K8sObject.StatefulSet = sfsOrig
}
}
dAnnotations := t.K8sObject.GetObjectMeta().GetAnnotations()
delete(dAnnotations, revisionAnnotation)
t.K8sObject.GetObjectMeta().SetAnnotations(dAnnotations)
if c != nil && isOktetoNamespace {
c := os.Getenv("OKTETO_CLIENTSIDE_TRANSLATION")
if c == "" {
commonTranslation(t)
return setTranslationAsAnnotation(t.K8sObject.PodTemplateSpec.GetObjectMeta(), t)
}
log.Infof("using clientside translation")
}
if t.K8sObject.ObjectType == model.DeploymentObjectType {
t.K8sObject.Deployment.Status = appsv1.DeploymentStatus{}
delete(t.K8sObject.Deployment.Annotations, model.DeploymentAnnotation)
manifestBytes, err := json.Marshal(t.K8sObject.Deployment)
if err != nil {
return err
}
annotations.Set(t.K8sObject.Deployment.GetObjectMeta(), model.DeploymentAnnotation, string(manifestBytes))
} else {
delete(t.K8sObject.StatefulSet.Annotations, model.StatefulsetAnnotation)
manifestBytes, err := json.Marshal(t.K8sObject.StatefulSet)
if err != nil {
return err
}
annotations.Set(t.K8sObject.StatefulSet.GetObjectMeta(), model.StatefulsetAnnotation, string(manifestBytes))
}
commonTranslation(t)
labels.Set(t.K8sObject.PodTemplateSpec.GetObjectMeta(), model.DevLabel, "true")
TranslateDevAnnotations(t.K8sObject.GetPodTemplate().GetObjectMeta(), t.Annotations)
TranslateDevTolerations(&t.K8sObject.GetPodTemplate().Spec, t.Tolerations)
t.K8sObject.PodTemplateSpec.Spec.TerminationGracePeriodSeconds = &devTerminationGracePeriodSeconds
if t.Interactive {
TranslateOktetoSyncSecret(&t.K8sObject.GetPodTemplate().Spec, t.Name)
} else {
TranslatePodAffinity(&t.K8sObject.GetPodTemplate().Spec, t.Name)
}
for _, rule := range t.Rules {
devContainer := GetDevContainer(&t.K8sObject.GetPodTemplate().Spec, rule.Container)
if devContainer == nil {
return fmt.Errorf("container '%s' not found in deployment '%s'", rule.Container, t.K8sObject.Name)
}
if rule.Image == "" {
rule.Image = devContainer.Image
}
TranslateDevContainer(devContainer, rule)
TranslatePodSpec(&t.K8sObject.GetPodTemplate().Spec, rule)
TranslateOktetoDevSecret(&t.K8sObject.GetPodTemplate().Spec, t.Name, rule.Secrets)
if rule.IsMainDevContainer() {
TranslateOktetoBinVolumeMounts(devContainer)
TranslateOktetoInitBinContainer(rule.InitContainer, &t.K8sObject.GetPodTemplate().Spec)
TranslateOktetoInitFromImageContainer(&t.K8sObject.GetPodTemplate().Spec, rule)
TranslateDinDContainer(&t.K8sObject.GetPodTemplate().Spec, rule)
TranslateOktetoBinVolume(&t.K8sObject.GetPodTemplate().Spec)
}
}
return nil
}
func commonTranslation(t *model.Translation) {
TranslateDevAnnotations(t.K8sObject.GetObjectMeta(), t.Annotations)
annotations.Set(t.K8sObject.GetObjectMeta(), oktetoVersionAnnotation, model.Version)
labels.Set(t.K8sObject.GetObjectMeta(), model.DevLabel, "true")
if t.Interactive {
labels.Set(t.K8sObject.GetPodTemplate().GetObjectMeta(), model.InteractiveDevLabel, t.Name)
} else {
labels.Set(t.K8sObject.GetPodTemplate().GetObjectMeta(), model.DetachedDevLabel, t.Name)
}
if t.K8sObject.ObjectType == model.DeploymentObjectType {
t.K8sObject.Deployment.Spec.Replicas = &devReplicas
t.K8sObject.Deployment.Spec.Strategy = appsv1.DeploymentStrategy{
Type: appsv1.RecreateDeploymentStrategyType,
}
} else {
t.K8sObject.StatefulSet.Spec.Replicas = &devReplicas
t.K8sObject.StatefulSet.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
}
}
}
//TranslateDevAnnotations sets the user provided annotations
func TranslateDevAnnotations(o metav1.Object, annotationsToAdd map[string]string) {
for key, value := range annotationsToAdd {
annotations.Set(o, key, value)
}
}
//TranslateDevTolerations sets the user provided toleretions
func TranslateDevTolerations(spec *apiv1.PodSpec, tolerations []apiv1.Toleration) {
spec.Tolerations = append(spec.Tolerations, tolerations...)
}
//TranslatePodAffinity translates the affinity of pod to be all on the same node
func TranslatePodAffinity(spec *apiv1.PodSpec, name string) {
if spec.Affinity == nil {
spec.Affinity = &apiv1.Affinity{}
}
if spec.Affinity.PodAffinity == nil {
spec.Affinity.PodAffinity = &apiv1.PodAffinity{}
}
if spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = []apiv1.PodAffinityTerm{}
}
spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
apiv1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
model.InteractiveDevLabel: name,
},
},
TopologyKey: "kubernetes.io/hostname",
},
)
}
//TranslateDevContainer translates a dev container
func TranslateDevContainer(c *apiv1.Container, rule *model.TranslationRule) {
c.Image = rule.Image
c.ImagePullPolicy = rule.ImagePullPolicy
if rule.WorkDir != "" {
c.WorkingDir = rule.WorkDir
}
if len(rule.Command) > 0 {
c.Command = rule.Command
c.Args = rule.Args
}
TranslateProbes(c, rule.Probes)
TranslateLifecycle(c, rule.Lifecycle)
TranslateResources(c, rule.Resources)
TranslateEnvVars(c, rule)
TranslateVolumeMounts(c, rule)
TranslateContainerSecurityContext(c, rule.SecurityContext)
}
func TranslatePodSpec(podSpec *apiv1.PodSpec, rule *model.TranslationRule) {
TranslateOktetoVolumes(podSpec, rule)
TranslatePodSecurityContext(podSpec, rule.SecurityContext)
TranslatePodServiceAccount(podSpec, rule.ServiceAccount)
TranslateOktetoNodeSelector(podSpec, rule.NodeSelector)
TranslateOktetoAffinity(podSpec, rule.Affinity)
}
//TranslateDinDContainer translates the DinD container
func TranslateDinDContainer(spec *apiv1.PodSpec, rule *model.TranslationRule) {
if !rule.Docker.Enabled {
return
}
c := apiv1.Container{
Name: "dind",
Image: rule.Docker.Image,
Env: []apiv1.EnvVar{
{
Name: "DOCKER_TLS_CERTDIR",
Value: model.DefaultDockerCertDir,
},
},
VolumeMounts: []apiv1.VolumeMount{},
SecurityContext: &apiv1.SecurityContext{
Privileged: pointer.BoolPtr(true),
},
}
for _, v := range rule.Volumes {
if isDockerVolumeMount(v.SubPath) {
c.VolumeMounts = append(
c.VolumeMounts,
apiv1.VolumeMount{
Name: v.Name,
MountPath: v.MountPath,
SubPath: v.SubPath,
},
)
}
}
translateInitResources(&c, rule.Docker.Resources)
spec.Containers = append(spec.Containers, c)
}
func isDockerVolumeMount(subPath string) bool {
if strings.HasPrefix(subPath, model.SourceCodeSubPath) {
return true
}
if subPath == model.DefaultDockerCertDirSubPath {
return true
}
return subPath == model.DefaultDockerCacheDirSubPath
}
//TranslateProbes translates the probes attached to a container
func TranslateProbes(c *apiv1.Container, p *model.Probes) {
if p == nil {
return
}
if !p.Liveness {
c.LivenessProbe = nil
}
if !p.Readiness {
c.ReadinessProbe = nil
}
if !p.Startup {
c.StartupProbe = nil
}
}
//TranslateLifecycle translates the lifecycle events attached to a container
func TranslateLifecycle(c *apiv1.Container, l *model.Lifecycle) {
if l == nil {
return
}
if c.Lifecycle == nil {
return
}
if !l.PostStart {
c.Lifecycle.PostStart = nil
}
if !l.PostStart {
c.Lifecycle.PostStart = nil
}
}
//TranslateResources translates the resources attached to a container
func TranslateResources(c *apiv1.Container, r model.ResourceRequirements) {
if c.Resources.Requests == nil {
c.Resources.Requests = make(map[apiv1.ResourceName]resource.Quantity)
}
if v, ok := r.Requests[apiv1.ResourceMemory]; ok {
c.Resources.Requests[apiv1.ResourceMemory] = v
}
if v, ok := r.Requests[apiv1.ResourceCPU]; ok {
c.Resources.Requests[apiv1.ResourceCPU] = v
}
if v, ok := r.Requests[model.ResourceAMDGPU]; ok {
c.Resources.Requests[model.ResourceAMDGPU] = v
}
if v, ok := r.Requests[model.ResourceNVIDIAGPU]; ok {
c.Resources.Requests[model.ResourceNVIDIAGPU] = v
}
if c.Resources.Limits == nil {
c.Resources.Limits = make(map[apiv1.ResourceName]resource.Quantity)
}
if v, ok := r.Limits[apiv1.ResourceMemory]; ok {
c.Resources.Limits[apiv1.ResourceMemory] = v
}
if v, ok := r.Limits[apiv1.ResourceCPU]; ok {
c.Resources.Limits[apiv1.ResourceCPU] = v
}
if v, ok := r.Limits[model.ResourceAMDGPU]; ok {
c.Resources.Limits[model.ResourceAMDGPU] = v
}
if v, ok := r.Limits[model.ResourceNVIDIAGPU]; ok {
c.Resources.Limits[model.ResourceNVIDIAGPU] = v
}
}
//TranslateEnvVars translates the variables attached to a container
func TranslateEnvVars(c *apiv1.Container, rule *model.TranslationRule) {
unusedDevEnvVar := map[string]string{}
for _, val := range rule.Environment {
unusedDevEnvVar[val.Name] = val.Value
}
for i, envvar := range c.Env {
if value, ok := unusedDevEnvVar[envvar.Name]; ok {
c.Env[i] = apiv1.EnvVar{Name: envvar.Name, Value: value}
delete(unusedDevEnvVar, envvar.Name)
}
}
for _, envvar := range rule.Environment {
if value, ok := unusedDevEnvVar[envvar.Name]; ok {
c.Env = append(c.Env, apiv1.EnvVar{Name: envvar.Name, Value: value})
}
}
}
//TranslateVolumeMounts translates the volumes attached to a container
func TranslateVolumeMounts(c *apiv1.Container, rule *model.TranslationRule) {
if c.VolumeMounts == nil {
c.VolumeMounts = []apiv1.VolumeMount{}
}
for _, v := range rule.Volumes {
if v.SubPath == model.DefaultDockerCacheDirSubPath {
continue
}
c.VolumeMounts = append(
c.VolumeMounts,
apiv1.VolumeMount{
Name: v.Name,
MountPath: v.MountPath,
SubPath: v.SubPath,
},
)
}
if rule.Marker == "" {
return
}
c.VolumeMounts = append(
c.VolumeMounts,
apiv1.VolumeMount{
Name: oktetoSyncSecretVolume,
MountPath: "/var/syncthing/secret/",
},
)
if len(rule.Secrets) > 0 {
c.VolumeMounts = append(
c.VolumeMounts,
apiv1.VolumeMount{
Name: oktetoDevSecretVolume,
MountPath: "/var/okteto/secret/",
},
)
}
}
//TranslateOktetoBinVolumeMounts translates the binaries mount attached to a container
func TranslateOktetoBinVolumeMounts(c *apiv1.Container) {
if c.VolumeMounts == nil {
c.VolumeMounts = []apiv1.VolumeMount{}
}
for _, vm := range c.VolumeMounts {
if vm.Name == OktetoBinName {
return
}
}
vm := apiv1.VolumeMount{
Name: OktetoBinName,
MountPath: "/var/okteto/bin",
}
c.VolumeMounts = append(c.VolumeMounts, vm)
}
//TranslateOktetoVolumes translates the dev volumes
func TranslateOktetoVolumes(spec *apiv1.PodSpec, rule *model.TranslationRule) {
if spec.Volumes == nil {
spec.Volumes = []apiv1.Volume{}
}
for _, rV := range rule.Volumes {
found := false
for i := range spec.Volumes {
if spec.Volumes[i].Name == rV.Name {
found = true
break
}
}
if found {
continue
}
v := apiv1.Volume{
Name: rV.Name,
VolumeSource: apiv1.VolumeSource{},
}
if !rule.PersistentVolume && rV.IsSyncthing() {
v.VolumeSource.EmptyDir = &apiv1.EmptyDirVolumeSource{}
} else {
v.VolumeSource.PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: rV.Name,
ReadOnly: false,
}
}
spec.Volumes = append(spec.Volumes, v)
}
}
//TranslateOktetoBinVolume translates the binaries volume attached to a container
func TranslateOktetoBinVolume(spec *apiv1.PodSpec) {
if spec.Volumes == nil {
spec.Volumes = []apiv1.Volume{}
}
for i := range spec.Volumes {
if spec.Volumes[i].Name == OktetoBinName {
return
}
}
v := apiv1.Volume{
Name: OktetoBinName,
VolumeSource: apiv1.VolumeSource{
EmptyDir: &apiv1.EmptyDirVolumeSource{},
},
}
spec.Volumes = append(spec.Volumes, v)
}
//TranslatePodSecurityContext translates the security context attached to a pod
func TranslatePodSecurityContext(spec *apiv1.PodSpec, s *model.SecurityContext) {
if s == nil {
return
}
if spec.SecurityContext == nil {
spec.SecurityContext = &apiv1.PodSecurityContext{}
}
if s.FSGroup != nil {
spec.SecurityContext.FSGroup = s.FSGroup
}
}
//TranslatePodServiceAccount translates the security account the pod uses
func TranslatePodServiceAccount(spec *apiv1.PodSpec, sa string) {
if sa != "" {
spec.ServiceAccountName = sa
}
}
//TranslateContainerSecurityContext translates the security context attached to a container
func TranslateContainerSecurityContext(c *apiv1.Container, s *model.SecurityContext) {
if s == nil {
return
}
if c.SecurityContext == nil {
c.SecurityContext = &apiv1.SecurityContext{}
}
if s.RunAsUser != nil {
c.SecurityContext.RunAsUser = s.RunAsUser
if *s.RunAsUser == 0 {
c.SecurityContext.RunAsNonRoot = &falseBoolean
}
}
if s.RunAsGroup != nil {
c.SecurityContext.RunAsGroup = s.RunAsGroup
if *s.RunAsGroup == 0 {
c.SecurityContext.RunAsNonRoot = &falseBoolean
}
}
if s.Capabilities == nil {
return
}
if c.SecurityContext.Capabilities == nil {
c.SecurityContext.Capabilities = &apiv1.Capabilities{}
}
c.SecurityContext.ReadOnlyRootFilesystem = nil
c.SecurityContext.Capabilities.Add = append(c.SecurityContext.Capabilities.Add, s.Capabilities.Add...)
c.SecurityContext.Capabilities.Drop = append(c.SecurityContext.Capabilities.Drop, s.Capabilities.Drop...)
}
func translateInitResources(c *apiv1.Container, resources model.ResourceRequirements) {
if len(resources.Requests) > 0 {
c.Resources.Requests = map[apiv1.ResourceName]resource.Quantity{}
}
for k, v := range resources.Requests {
c.Resources.Requests[k] = v
}
if len(resources.Limits) > 0 {
c.Resources.Limits = map[apiv1.ResourceName]resource.Quantity{}
}
for k, v := range resources.Limits {
c.Resources.Limits[k] = v
}
}
//TranslateOktetoInitBinContainer translates the bin init container of a pod
func TranslateOktetoInitBinContainer(initContainer model.InitContainer, spec *apiv1.PodSpec) {
c := apiv1.Container{
Name: OktetoBinName,
Image: initContainer.Image,
ImagePullPolicy: apiv1.PullIfNotPresent,
Command: []string{"sh", "-c", "cp /usr/local/bin/* /okteto/bin"},
VolumeMounts: []apiv1.VolumeMount{
{
Name: OktetoBinName,
MountPath: "/okteto/bin",
},
},
}
translateInitResources(&c, initContainer.Resources)
if spec.InitContainers == nil {
spec.InitContainers = []apiv1.Container{}
}
spec.InitContainers = append(spec.InitContainers, c)
}
//TranslateOktetoInitFromImageContainer translates the init from image container of a pod
func TranslateOktetoInitFromImageContainer(spec *apiv1.PodSpec, rule *model.TranslationRule) {
if !rule.PersistentVolume {
return
}
if spec.InitContainers == nil {
spec.InitContainers = []apiv1.Container{}
}
c := &apiv1.Container{
Name: OktetoInitVolumeContainerName,
Image: rule.Image,
ImagePullPolicy: apiv1.PullIfNotPresent,
VolumeMounts: []apiv1.VolumeMount{},
}
command := "echo initializing"
iVolume := 1
for _, v := range rule.Volumes {
if !strings.HasPrefix(v.SubPath, model.SourceCodeSubPath) && !strings.HasPrefix(v.SubPath, model.DataSubPath) {
continue
}
c.VolumeMounts = append(
c.VolumeMounts,
apiv1.VolumeMount{
Name: v.Name,
MountPath: fmt.Sprintf("/init-volume/%d", iVolume),
SubPath: v.SubPath,
},
)
mounPath := path.Join(v.MountPath, ".")
command = fmt.Sprintf("%s && ( [ \"$(ls -A /init-volume/%d)\" ] || cp -R %s/. /init-volume/%d || true)", command, iVolume, mounPath, iVolume)
iVolume++
}
c.Command = []string{"sh", "-cx", command}
translateInitResources(c, rule.InitContainer.Resources)
TranslateContainerSecurityContext(c, rule.SecurityContext)
spec.InitContainers = append(spec.InitContainers, *c)
}
//TranslateOktetoSyncSecret translates the syncthing secret container of a pod
func TranslateOktetoSyncSecret(spec *apiv1.PodSpec, name string) {
if spec.Volumes == nil {
spec.Volumes = []apiv1.Volume{}
}
for i := range spec.Volumes {
if spec.Volumes[i].Name == oktetoSyncSecretVolume {
return
}
}
var mode int32 = 0444
v := apiv1.Volume{
Name: oktetoSyncSecretVolume,
VolumeSource: apiv1.VolumeSource{
Secret: &apiv1.SecretVolumeSource{
SecretName: fmt.Sprintf(oktetoSecretTemplate, name),
Items: []apiv1.KeyToPath{
{
Key: "config.xml",
Path: "config.xml",
Mode: &mode,
},
{
Key: "cert.pem",
Path: "cert.pem",
Mode: &mode,
},
{
Key: "key.pem",
Path: "key.pem",
Mode: &mode,
},
},
},
},
}
spec.Volumes = append(spec.Volumes, v)
}
//TranslateOktetoDevSecret translates the devs secret of a pod
func TranslateOktetoDevSecret(spec *apiv1.PodSpec, secret string, secrets []model.Secret) {
if len(secrets) == 0 {
return
}
if spec.Volumes == nil {
spec.Volumes = []apiv1.Volume{}
}
for i := range spec.Volumes {
if spec.Volumes[i].Name == oktetoDevSecretVolume {
return
}
}
v := apiv1.Volume{
Name: oktetoDevSecretVolume,
VolumeSource: apiv1.VolumeSource{
Secret: &apiv1.SecretVolumeSource{
SecretName: fmt.Sprintf(oktetoSecretTemplate, secret),
Items: []apiv1.KeyToPath{},
},
},
}
for i, s := range secrets {
v.VolumeSource.Secret.Items = append(
v.VolumeSource.Secret.Items,
apiv1.KeyToPath{
Key: s.GetKeyName(),
Path: s.GetFileName(),
Mode: &secrets[i].Mode,
},
)
}
spec.Volumes = append(spec.Volumes, v)
}
func TranslateOktetoNodeSelector(spec *apiv1.PodSpec, nodeSelector map[string]string) {
spec.NodeSelector = nodeSelector
}
func TranslateOktetoAffinity(spec *apiv1.PodSpec, affinity *apiv1.Affinity) {
if affinity != nil {
if affinity.NodeAffinity == nil && affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil {
return
}
spec.Affinity = affinity
}
}
//TranslateDevModeOff reverses the dev mode translation
func TranslateDevModeOff(k8sObject *model.K8sObject) (*model.K8sObject, error) {
trRulesJSON := annotations.Get(k8sObject.PodTemplateSpec.GetObjectMeta(), model.TranslationAnnotation)
if trRulesJSON == "" {
if k8sObject.ObjectType == model.DeploymentObjectType {
dManifest := annotations.Get(k8sObject.GetObjectMeta(), model.DeploymentAnnotation)
if dManifest == "" {
log.Infof("%s/%s is not a development container", k8sObject.Namespace, k8sObject.Name)
return k8sObject, nil
}
dOrig := &appsv1.Deployment{}
if err := json.Unmarshal([]byte(dManifest), dOrig); err != nil {
return nil, fmt.Errorf("malformed manifest: %s", err)
}
k8sObject.UpdateDeployment(dOrig)
} else {
sfsManifest := annotations.Get(k8sObject.GetObjectMeta(), model.StatefulsetAnnotation)
if sfsManifest == "" {
log.Infof("%s/%s is not a development container", k8sObject.Namespace, k8sObject.Name)
return k8sObject, nil
}
sfsOrig := &appsv1.StatefulSet{}
if err := json.Unmarshal([]byte(sfsManifest), sfsOrig); err != nil {
return nil, fmt.Errorf("malformed manifest: %s", err)
}
k8sObject.UpdateStatefulset(sfsOrig)
}
return k8sObject, nil
}
trRules := &model.Translation{}
if err := json.Unmarshal([]byte(trRulesJSON), trRules); err != nil {
return nil, fmt.Errorf("malformed tr rules: %s", err)
}
k8sObject.SetReplicas(&trRules.Replicas)
k8sObject.UpdateStrategy(trRules.Strategy)
annotations := k8sObject.GetObjectMeta().GetAnnotations()
delete(annotations, oktetoVersionAnnotation)
deleteUserAnnotations(annotations, trRules)
k8sObject.GetObjectMeta().SetAnnotations(annotations)
annotations = k8sObject.PodTemplateSpec.GetObjectMeta().GetAnnotations()
delete(annotations, model.TranslationAnnotation)
delete(annotations, model.OktetoRestartAnnotation)
k8sObject.PodTemplateSpec.GetObjectMeta().SetAnnotations(annotations)
labels := k8sObject.GetObjectMeta().GetLabels()
delete(labels, model.DevLabel)
delete(labels, model.InteractiveDevLabel)
delete(labels, model.DetachedDevLabel)
k8sObject.GetObjectMeta().SetLabels(labels)
labels = k8sObject.PodTemplateSpec.GetObjectMeta().GetLabels()
delete(labels, model.InteractiveDevLabel)
delete(labels, model.DetachedDevLabel)
k8sObject.PodTemplateSpec.GetObjectMeta().SetLabels(labels)
k8sObject.UpdateObjectMeta()
return k8sObject, nil
}
| [
"\"OKTETO_CLIENTSIDE_TRANSLATION\""
]
| []
| [
"OKTETO_CLIENTSIDE_TRANSLATION"
]
| [] | ["OKTETO_CLIENTSIDE_TRANSLATION"] | go | 1 | 0 | |
src/org/nschmidt/ldparteditor/dialogs/startup/StartupDialog.java | /* MIT - License
Copyright (c) 2012 - this year, Nils Schmidt
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
package org.nschmidt.ldparteditor.dialogs.startup;
import static org.nschmidt.ldparteditor.helpers.WidgetUtility.WidgetUtil;
import java.io.File;
import java.util.Locale;
import org.eclipse.swt.SWT;
import org.eclipse.swt.widgets.DirectoryDialog;
import org.eclipse.swt.widgets.Shell;
import org.nschmidt.ldparteditor.enums.MyLanguage;
import org.nschmidt.ldparteditor.helpers.FileHelper;
import org.nschmidt.ldparteditor.logger.NLogger;
import org.nschmidt.ldparteditor.workbench.UserSettingState;
import org.nschmidt.ldparteditor.workbench.WorkbenchManager;
/**
* This first dialog - shown on startup - asks for mandatory information about
* the environment.
* <p>
* Note: This class should be instantiated, it defines all listeners and part of
* the business logic. It overrides the {@code open()} method to invoke the
* listener definitions ;)
*
* @author nils
*
*/
public class StartupDialog extends StartupDesign {
private String partAuthoringPath = ""; //$NON-NLS-1$
private String ldrawPath = ""; //$NON-NLS-1$
private String unofficialPath = ""; //$NON-NLS-1$
private String ldrawUserName = ""; //$NON-NLS-1$
private String license = "0 !LICENSE Redistributable under CCAL version 2.0 : see CAreadme.txt"; //$NON-NLS-1$
private String realName = ""; //$NON-NLS-1$
private Locale locale = Locale.US;
private boolean path1valid = false;
private boolean path2valid = false;
private boolean path3valid = false;
/**
* Create the dialog.
*
* @param parentShell
*/
public StartupDialog(Shell parentShell) {
super(parentShell);
}
@Override
public int open() {
String ldrawDir = System.getenv("LDRAWDIR"); //$NON-NLS-1$
if (ldrawDir != null) {
setLdrawPath(ldrawDir);
setUnofficialPath(ldrawDir + File.separator + "Unofficial"); //$NON-NLS-1$
path1valid = FileHelper.canReadFromPath(ldrawPath);
path3valid = FileHelper.canReadFromPath(unofficialPath);
}
super.create();
btn_ok[0].setEnabled(false);
// MARK All final listeners will be configured here..
WidgetUtil(btn_browseLdrawPath[0]).addSelectionListener(e -> {
DirectoryDialog dlg = new DirectoryDialog(getShell());
// Set the initial filter to null
dlg.setFilterPath(null);
// Change the title bar text
dlg.setText("Define the LDraw Folder Path:"); //$NON-NLS-1$ NO_I18N!!
// Customizable message displayed in the dialog
dlg.setMessage("Select a Directory"); //$NON-NLS-1$ NO_I18N!!
// Calling open() will open and run the dialog.
// It will return the selected directory, or
// null if user cancels
String dir = dlg.open();
if (dir != null) {
// Set the text box to the new selection
txt_ldrawPath[0].setText(dir);
ldrawPath = dir;
path1valid = FileHelper.canReadFromPath(ldrawPath);
if (path1valid && unofficialPath.isEmpty()) {
if (ldrawPath.endsWith(File.separator)) {
unofficialPath = ldrawPath + "Unofficial"; //$NON-NLS-1$
} else {
unofficialPath = ldrawPath + File.separator + "Unofficial"; //$NON-NLS-1$
}
if (FileHelper.canWriteToPath(ldrawPath)) {
try {
File unofficialFolder = new File(unofficialPath);
if (!unofficialFolder.exists()) {
unofficialFolder.mkdir();
}
path3valid = true;
} catch (SecurityException s) {
NLogger.error(getClass(), "Failed to create unofficial library folder."); //$NON-NLS-1$
unofficialPath = ""; //$NON-NLS-1$
}
}
txt_unofficialPath[0].setText(unofficialPath);
}
btn_ok[0].setEnabled(path1valid && path2valid && path3valid && !ldrawUserName.isEmpty() && !license.isEmpty() && !realName.isEmpty());
}
});
WidgetUtil(btn_browseAuthoringPath[0]).addSelectionListener(e -> {
DirectoryDialog dlg = new DirectoryDialog(getShell());
// Set the initial filter to null
dlg.setFilterPath(null);
// Change the title bar text
dlg.setText("Where is your parts authoring folder located?"); //$NON-NLS-1$ NO_I18N!!
// Customizable message displayed in the dialog
dlg.setMessage("Select a Directory"); //$NON-NLS-1$ NO_I18N!!
// Calling open() will open and run the dialog.
// It will return the selected directory, or
// null if user cancels
String dir = dlg.open();
if (dir != null) {
// Set the text box to the new selection
txt_partAuthoringPath[0].setText(dir);
partAuthoringPath = dir;
path2valid = FileHelper.canReadFromPath(partAuthoringPath) && FileHelper.canWriteToPath(partAuthoringPath);
btn_ok[0].setEnabled(path1valid && path2valid && path3valid && !ldrawUserName.isEmpty() && !license.isEmpty() && !realName.isEmpty());
}
});
WidgetUtil(btn_browseUnofficialPath[0]).addSelectionListener(e -> {
DirectoryDialog dlg = new DirectoryDialog(getShell());
// Set the initial filter to null
dlg.setFilterPath(null);
// Change the title bar text
dlg.setText("Where is your unofficial parts folder located?"); //$NON-NLS-1$ NO_I18N!!
// Customizable message displayed in the dialog
dlg.setMessage("Select a Directory"); //$NON-NLS-1$ NO_I18N!!
// Calling open() will open and run the dialog.
// It will return the selected directory, or
// null if user cancels
String dir = dlg.open();
if (dir != null) {
// Set the text box to the new selection
txt_unofficialPath[0].setText(dir);
unofficialPath = dir;
path3valid = FileHelper.canReadFromPath(unofficialPath);
btn_ok[0].setEnabled(path1valid && path2valid && path3valid && !ldrawUserName.isEmpty() && !license.isEmpty() && !realName.isEmpty());
}
});
txt_ldrawUserName[0].addListener(SWT.Modify, e -> {
ldrawUserName = txt_ldrawUserName[0].getText();
btn_ok[0].setEnabled(path1valid && path2valid && !ldrawUserName.isEmpty() && !license.isEmpty() && !realName.isEmpty());
});
txt_realName[0].addListener(SWT.Modify, e -> {
realName = txt_realName[0].getText();
btn_ok[0].setEnabled(path1valid && path2valid && !ldrawUserName.isEmpty() && !license.isEmpty() && !realName.isEmpty());
});
cmb_license[0].addListener(SWT.Modify, e -> {
license = cmb_license[0].getText();
btn_ok[0].setEnabled(path1valid && path2valid && !ldrawUserName.isEmpty() && !license.isEmpty() && !realName.isEmpty());
});
cmb_locale[0].addListener(SWT.Modify, e -> {
if (localeMap.containsKey(cmb_locale[0].getText())) {
locale = localeMap.get(cmb_locale[0].getText());
}
});
btn_ok[0].addListener(SWT.Selection, event -> {
UserSettingState userSettingState = new UserSettingState();
userSettingState.setAuthoringFolderPath(partAuthoringPath);
userSettingState.setLdrawFolderPath(ldrawPath);
userSettingState.setUnofficialFolderPath(unofficialPath);
userSettingState.setLdrawUserName(ldrawUserName);
userSettingState.setLicense(license);
userSettingState.setRealUserName(realName);
userSettingState.setUsingRelativePaths(false);
userSettingState.setLocale(locale);
MyLanguage.LOCALE = locale;
WorkbenchManager.setUserSettingState(userSettingState);
});
return super.open();
}
public void setLdrawPath(String ldrawPath) {
this.ldrawPath = ldrawPath;
}
public void setUnofficialPath(String unofficialPath) {
this.unofficialPath = unofficialPath;
}
}
| [
"\"LDRAWDIR\""
]
| []
| [
"LDRAWDIR"
]
| [] | ["LDRAWDIR"] | java | 1 | 0 | |
release.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility for creating release candidates and promoting release candidates to a final relase.
Usage: release.py
The utility is interactive; you will be prompted for basic release information and guided through the process.
This utility assumes you already have local a kafka git folder and that you
have added remotes corresponding to both:
(i) the github apache kafka mirror and
(ii) the apache kafka git repo.
"""
from __future__ import print_function
import datetime
from getpass import getpass
import json
import os
import subprocess
import sys
import tempfile
PROJECT_NAME = "kafka"
CAPITALIZED_PROJECT_NAME = "kafka".upper()
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# Location of the local git repository
REPO_HOME = os.environ.get("%s_HOME" % CAPITALIZED_PROJECT_NAME, SCRIPT_DIR)
# Remote name, which points to Github by default
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache-github")
PREFS_FILE = os.path.join(SCRIPT_DIR, '.release-settings.json')
delete_gitrefs = False
work_dir = None
def fail(msg):
if work_dir:
cmd("Cleaning up work directory", "rm -rf %s" % work_dir)
if delete_gitrefs:
try:
cmd("Resetting repository working state to branch %s" % starting_branch, "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True)
cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True)
cmd("Deleting git tag %s" %rc_tag , "git tag -d %s" % rc_tag, shell=True)
except subprocess.CalledProcessError:
print("Failed when trying to clean up git references added by this script. You may need to clean up branches/tags yourself before retrying.")
print("Expected git branch: " + release_version)
print("Expected git tag: " + rc_tag)
print(msg)
sys.exit(1)
def print_output(output):
if output is None or len(output) == 0:
return
for line in output.split('\n'):
print(">", line)
def cmd(action, cmd, *args, **kwargs):
if isinstance(cmd, basestring) and not kwargs.get("shell", False):
cmd = cmd.split()
allow_failure = kwargs.pop("allow_failure", False)
stdin_log = ""
if "stdin" in kwargs and isinstance(kwargs["stdin"], basestring):
stdin_log = "--> " + kwargs["stdin"]
stdin = tempfile.TemporaryFile()
stdin.write(kwargs["stdin"])
stdin.seek(0)
kwargs["stdin"] = stdin
print(action, cmd, stdin_log)
try:
output = subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs)
print_output(output)
except subprocess.CalledProcessError as e:
print_output(e.output)
if allow_failure:
return
print("*************************************************")
print("*** First command failure occurred here. ***")
print("*** Will now try to clean up working state. ***")
print("*************************************************")
fail("")
def cmd_output(cmd, *args, **kwargs):
if isinstance(cmd, basestring):
cmd = cmd.split()
return subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs)
def replace(path, pattern, replacement):
updated = []
with open(path, 'r') as f:
for line in f:
updated.append((replacement + '\n') if line.startswith(pattern) else line)
with open(path, 'w') as f:
for line in updated:
f.write(line)
def user_ok(msg):
ok = raw_input(msg)
return ok.lower() == 'y'
def sftp_mkdir(dir):
basedir, dirname = os.path.split(dir)
if not basedir:
basedir = "."
try:
cmd_str = """
cd %s
-mkdir %s
""" % (basedir, dirname)
cmd("Creating '%s' in '%s' in your Apache home directory if it does not exist (errors are ok if the directory already exists)" % (dirname, basedir), "sftp -b - %[email protected]" % apache_id, stdin=cmd_str, allow_failure=True)
except subprocess.CalledProcessError:
# This is ok. The command fails if the directory already exists
pass
def get_pref(prefs, name, request_fn):
"Get a preference from existing preference dictionary or invoke a function that can collect it from the user"
val = prefs.get(name)
if not val:
val = request_fn()
prefs[name] = val
return val
# Load saved preferences
prefs = {}
if os.path.exists(PREFS_FILE):
with open(PREFS_FILE, 'r') as prefs_fp:
prefs = json.load(prefs_fp)
if not user_ok("""Requirements:
1. Updated docs to reference the new release version where appropriate.
2. JDK7 and JDK8 compilers and libraries
3. Your Apache ID, already configured with SSH keys on id.apache.org and SSH keys available in this shell session
4. All issues in the target release resolved with valid resolutions (if not, this script will report the problematic JIRAs)
5. A GPG key used for signing the release. This key should have been added to public Apache servers and the KEYS file on the Kafka site
6. Standard toolset installed -- git, gpg, gradle, sftp, etc.
7. ~/.gradle/gradle.properties configured with the signing properties described in the release process wiki, i.e.
mavenUrl=https://repository.apache.org/service/local/staging/deploy/maven2
mavenUsername=your-apache-id
mavenPassword=your-apache-passwd
signing.keyId=your-gpgkeyId
signing.password=your-gpg-passphrase
signing.secretKeyRingFile=/Users/your-id/.gnupg/secring.gpg (if you are using GPG 2.1 and beyond, then this file will no longer exist anymore, and you have to manually create it from the new private key directory with "gpg --export-secret-keys -o ~/.gnupg/secring.gpg")
8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,
<server>
<id>apache.releases.https</id>
<username>your-apache-id</username>
<password>your-apache-passwd</password>
</server>
<server>
<id>your-gpgkeyId</id>
<passphrase>your-gpg-passphase</passphrase>
</server>
<profile>
<id>gpg-signing</id>
<properties>
<gpg.keyname>your-gpgkeyId</gpg.keyname>
<gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId>
</properties>
</profile>
9. You may also need to update some gnupgp configs:
~/.gnupg/gpg-agent.conf
allow-loopback-pinentry
~/.gnupg/gpg.conf
use-agent
pinentry-mode loopback
echo RELOADAGENT | gpg-connect-agent
If any of these are missing, see https://cwiki.apache.org/confluence/display/KAFKA/Release+Process for instructions on setting them up.
Some of these may be used from these previous settings loaded from %s:
%s
Do you have all of of these setup? (y/n): """ % (PREFS_FILE, json.dumps(prefs, indent=2))):
fail("Please try again once you have all the prerequisites ready.")
starting_branch = cmd_output('git rev-parse --abbrev-ref HEAD')
cmd("Verifying that you have no unstaged git changes", 'git diff --exit-code --quiet')
cmd("Verifying that you have no staged git changes", 'git diff --cached --exit-code --quiet')
release_version = raw_input("Release version (without any RC info, e.g. 0.10.2.0): ")
try:
release_version_parts = release_version.split('.')
if len(release_version_parts) != 4:
fail("Invalid release version, should have 4 version number components")
# Validate each part is a number
[int(x) for x in release_version_parts]
except ValueError:
fail("Invalid release version, should be a dotted version number")
rc = raw_input("Release candidate number: ")
dev_branch = '.'.join(release_version_parts[:3])
docs_version = ''.join(release_version_parts[:3])
# Validate that the release doesn't already exist and that the
cmd("Fetching tags from upstream", 'git fetch --tags %s' % PUSH_REMOTE_NAME)
tags = cmd_output('git tag').split()
if release_version in tags:
fail("The specified version has already been tagged and released.")
# TODO promotion
if not rc:
fail("Automatic Promotion is not yet supported.")
# Find the latest RC and make sure they want to promote that one
rc_tag = sorted([t for t in tags if t.startswith(release_version + '-rc')])[-1]
if not user_ok("Found %s as latest RC for this release. Is this correct? (y/n): "):
fail("This script couldn't determine which RC tag to promote, you'll need to fix up the RC tags and re-run the script.")
sys.exit(0)
# Prereq checks
apache_id = get_pref(prefs, 'apache_id', lambda: raw_input("Enter your apache username: "))
jdk7_java_home = get_pref(prefs, 'jdk7', lambda: raw_input("Enter the path for JAVA_HOME for a JDK7 compiler (blank to use default JAVA_HOME): "))
jdk7_env = dict(os.environ) if jdk7_java_home.strip() else None
if jdk7_env is not None: jdk7_env['JAVA_HOME'] = jdk7_java_home
if "1.7.0" not in cmd_output("java -version", env=jdk7_env):
fail("You must be able to build artifacts with JDK7 for Scala 2.10 and 2.11 artifacts")
jdk8_java_home = get_pref(prefs, 'jdk8', lambda: raw_input("Enter the path for JAVA_HOME for a JDK8 compiler (blank to use default JAVA_HOME): "))
jdk8_env = dict(os.environ) if jdk8_java_home.strip() else None
if jdk8_env is not None: jdk8_env['JAVA_HOME'] = jdk8_java_home
if "1.8.0" not in cmd_output("java -version", env=jdk8_env):
fail("You must be able to build artifacts with JDK8 for Scala 2.12 artifacts")
def select_gpg_key():
print("Here are the available GPG keys:")
available_keys = cmd_output("gpg --list-secret-keys")
print(available_keys)
key_name = raw_input("Which user name (enter the user name without email address): ")
if key_name not in available_keys:
fail("Couldn't find the requested key.")
return key_name
key_name = get_pref(prefs, 'gpg-key', select_gpg_key)
gpg_passphrase = get_pref(prefs, 'gpg-pass', lambda: getpass("Passphrase for this GPG key: "))
# Do a quick validation so we can fail fast if the password is incorrect
with tempfile.NamedTemporaryFile() as gpg_test_tempfile:
gpg_test_tempfile.write("abcdefg")
cmd("Testing GPG key & passphrase", ["gpg", "--batch", "--pinentry-mode", "loopback", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", gpg_test_tempfile.name + ".asc", "--detach-sig", gpg_test_tempfile.name], stdin=gpg_passphrase)
# Save preferences
print("Saving preferences to %s" % PREFS_FILE)
with open(PREFS_FILE, 'w') as prefs_fp:
prefs = json.dump(prefs, prefs_fp)
# Generate RC
try:
int(rc)
except ValueError:
fail("Invalid release candidate number: %s" % rc)
rc_tag = release_version + '-rc' + rc
delete_gitrefs = True # Since we are about to start creating new git refs, enable cleanup function on failure to try to delete them
cmd("Checking out current development branch", "git checkout -b %s %s" % (release_version, PUSH_REMOTE_NAME + "/" + dev_branch))
print("Updating version numbers")
replace("gradle.properties", "version", "version=%s" % release_version)
replace("tests/kafkatest/__init__.py", "__version__", "__version__ = '%s'" % release_version)
cmd("update streams quickstart pom", ["sed", "-i", ".orig"," s/-SNAPSHOT//", "streams/quickstart/pom.xml"])
cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/pom.xml"])
cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/src/main/resources/archetype-resources/pom.xml"])
cmd("remove backup pom.xml", "rm streams/quickstart/pom.xml.orig")
cmd("remove backup java pom.xml", "rm streams/quickstart/java/pom.xml.orig")
cmd("remove backup java pom.xml", "rm streams/quickstart/java/src/main/resources/archetype-resources/pom.xml.orig")
# Command in explicit list due to messages with spaces
cmd("Commiting version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version])
# Command in explicit list due to messages with spaces
cmd("Tagging release candidate %s" % rc_tag, ["git", "tag", "-a", rc_tag, "-m", rc_tag])
rc_githash = cmd_output("git show-ref --hash " + rc_tag)
cmd("Switching back to your starting branch", "git checkout %s" % starting_branch)
# Note that we don't use tempfile here because mkdtemp causes problems with sftp and being able to determine the absolute path to a file.
# Instead we rely on a fixed path and if it
work_dir = os.path.join(REPO_HOME, ".release_work_dir")
if os.path.exists(work_dir):
fail("A previous attempt at a release left dirty state in the work directory. Clean up %s before proceeding. (This attempt will try to cleanup, simply retrying may be sufficient now...)" % work_dir)
os.makedirs(work_dir)
print("Temporary build working director:", work_dir)
kafka_dir = os.path.join(work_dir, 'kafka')
streams_quickstart_dir = os.path.join(kafka_dir, 'streams/quickstart')
print("Streams quickstart dir", streams_quickstart_dir)
cmd("Creating staging area for release artifacts", "mkdir kafka-" + rc_tag, cwd=work_dir)
artifacts_dir = os.path.join(work_dir, "kafka-" + rc_tag)
cmd("Cloning clean copy of repo", "git clone %s kafka" % REPO_HOME, cwd=work_dir)
cmd("Checking out RC tag", "git checkout -b %s %s" % (release_version, rc_tag), cwd=kafka_dir)
current_year = datetime.datetime.now().year
cmd("Verifying the correct year in NOTICE", "grep %s NOTICE" % current_year, cwd=kafka_dir)
with open(os.path.join(artifacts_dir, "RELEASE_NOTES.html"), 'w') as f:
print("Generating release notes")
try:
subprocess.check_call(["./release_notes.py", release_version], stdout=f)
except subprocess.CalledProcessError as e:
print_output(e.output)
print("*************************************************")
print("*** First command failure occurred here. ***")
print("*** Will now try to clean up working state. ***")
print("*************************************************")
fail("")
params = { 'release_version': release_version,
'rc_tag': rc_tag,
'artifacts_dir': artifacts_dir
}
cmd("Creating source archive", "git archive --format tar.gz --prefix kafka-%(release_version)s-src/ -o %(artifacts_dir)s/kafka-%(release_version)s-src.tgz %(rc_tag)s" % params)
cmd("Building artifacts", "gradle", cwd=kafka_dir, env=jdk7_env)
cmd("Building artifacts", "./gradlew clean releaseTarGzAll aggregatedJavadoc", cwd=kafka_dir, env=jdk7_env)
# This should be removed when Java7 is dropped (cf. KAFKA-4421)
cmd("Building artifacts for Scala 2.12", "./gradlew releaseTarGz -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env)
cmd("Copying artifacts", "cp %s/core/build/distributions/* %s" % (kafka_dir, artifacts_dir), shell=True)
cmd("Copying artifacts", "cp -R %s/build/docs/javadoc %s" % (kafka_dir, artifacts_dir))
for filename in os.listdir(artifacts_dir):
full_path = os.path.join(artifacts_dir, filename)
if not os.path.isfile(full_path):
continue
# Commands in explicit list due to key_name possibly containing spaces
cmd("Signing " + full_path, ["gpg", "--batch", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", full_path + ".asc", "--detach-sig", full_path], stdin=gpg_passphrase)
cmd("Verifying " + full_path, ["gpg", "--verify", full_path + ".asc", full_path])
# Note that for verification, we need to make sure only the filename is used with --print-md because the command line
# argument for the file is included in the output and verification uses a simple diff that will break if an absolut path
# is used.
dir, fname = os.path.split(full_path)
cmd("Generating MD5 for " + full_path, "gpg --print-md md5 %s > %s.md5" % (fname, fname), shell=True, cwd=dir)
cmd("Generating SHA1 for " + full_path, "gpg --print-md sha1 %s > %s.sha1" % (fname, fname), shell=True, cwd=dir)
cmd("Generating SHA512 for " + full_path, "gpg --print-md sha512 %s > %s.sha512" % (fname, fname), shell=True, cwd=dir)
cmd("Listing artifacts to be uploaded:", "ls -R %s" % artifacts_dir)
if not user_ok("Going to upload the artifacts in %s, listed above, to your Apache home directory. Ok (y/n)?): " % artifacts_dir):
fail("Quitting")
sftp_mkdir("public_html")
kafka_output_dir = "kafka-" + rc_tag
sftp_mkdir(os.path.join("public_html", kafka_output_dir))
public_release_dir = os.path.join("public_html", kafka_output_dir)
# The sftp -r option doesn't seem to work as would be expected, at least with the version shipping on OS X. To work around this we process all the files and directories manually...
sftp_cmds = ""
for root, dirs, files in os.walk(artifacts_dir):
assert root.startswith(artifacts_dir)
for dir in dirs:
sftp_mkdir(os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], dir))
for file in files:
local_path = os.path.join(root, file)
remote_path = os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], file)
sftp_cmds = """
put %s %s
""" % (local_path, remote_path)
cmd("Uploading artifacts in %s to your Apache home directory" % root, "sftp -b - %[email protected]" % apache_id, stdin=sftp_cmds)
with open(os.path.expanduser("~/.gradle/gradle.properties")) as f:
contents = f.read()
if not user_ok("Going to build and upload mvn artifacts based on these settings:\n" + contents + '\nOK (y/n)?: '):
fail("Retry again later")
cmd("Building and uploading archives", "./gradlew uploadArchivesAll", cwd=kafka_dir, env=jdk7_env)
cmd("Building and uploading archives", "./gradlew uploadCoreArchives_2_12 -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env)
cmd("Building and uploading archives", "mvn deploy -Pgpg-signing", cwd=streams_quickstart_dir, env=jdk7_env)
release_notification_props = { 'release_version': release_version,
'rc': rc,
'rc_tag': rc_tag,
'rc_githash': rc_githash,
'dev_branch': dev_branch,
'docs_version': docs_version,
'apache_id': apache_id,
}
# TODO: Many of these suggested validation steps could be automated and would help pre-validate a lot of the stuff voters test
print("""
*******************************************************************************************************************************************************
Ok. We've built and staged everything for the %(rc_tag)s.
Now you should sanity check it before proceeding. All subsequent steps start making RC data public.
Some suggested steps:
* Grab the source archive and make sure it compiles: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz
* Grab one of the binary distros and run the quickstarts against them: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s.tgz
* Extract and verify one of the site docs jars: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s-site-docs.tgz
* Build a sample against jars in the staging repo: (TODO: Can we get a temporary URL before "closing" the staged artifacts?)
* Validate GPG signatures on at least one file:
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.asc &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.md5 &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha1 &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha512 &&
gpg --verify kafka-%(release_version)s-src.tgz.asc kafka-%(release_version)s-src.tgz &&
gpg --print-md md5 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.md5 &&
gpg --print-md sha1 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha1 &&
gpg --print-md sha512 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha512 &&
rm kafka-%(release_version)s-src.tgz* &&
echo "OK" || echo "Failed"
* Validate the javadocs look ok. They are at http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/
*******************************************************************************************************************************************************
""" % release_notification_props)
if not user_ok("Have you sufficiently verified the release artifacts (y/n)?: "):
fail("Ok, giving up")
print("Next, we need to get the Maven artifacts we published into the staging repository.")
# TODO: Can we get this closed via a REST API since we already need to collect credentials for this repo?
print("Go to https://repository.apache.org/#stagingRepositories and hit 'Close' for the new repository that was created by uploading artifacts.")
if not user_ok("Have you successfully deployed the artifacts (y/n)?: "):
fail("Ok, giving up")
if not user_ok("Ok to push RC tag %s (y/n)?: " % rc_tag):
fail("Ok, giving up")
cmd("Pushing RC tag", "git push %s %s" % (PUSH_REMOTE_NAME, rc_tag))
# Move back to starting branch and clean out the temporary release branch (e.g. 0.10.2.0) we used to generate everything
cmd("Resetting repository working state", "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True)
cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True)
email_contents = """
To: [email protected], [email protected], [email protected]
Subject: [VOTE] %(release_version)s RC%(rc)s
Hello Kafka users, developers and client-developers,
This is the first candidate for release of Apache Kafka %(release_version)s.
<DESCRIPTION OF MAJOR CHANGES, INCLUDE INDICATION OF MAJOR/MINOR RELEASE>
Release notes for the %(release_version)s release:
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/RELEASE_NOTES.html
*** Please download, test and vote by <VOTING DEADLINE, e.g. Monday, March 28, 9am PT>
Kafka's KEYS file containing PGP keys we use to sign the release:
http://kafka.apache.org/KEYS
* Release artifacts to be voted upon (source and binary):
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/
* Maven artifacts to be voted upon:
https://repository.apache.org/content/groups/staging/
* Javadoc:
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/
* Tag to be voted upon (off %(dev_branch)s branch) is the %(release_version)s tag:
https://github.com/apache/kafka/releases/tag/%(rc_tag)s
* Documentation:
http://kafka.apache.org/%(docs_version)s/documentation.html
* Protocol:
http://kafka.apache.org/%(docs_version)s/protocol.html
* Successful Jenkins builds for the %(dev_branch)s branch:
Unit/integration tests: https://builds.apache.org/job/kafka-%(dev_branch)s-jdk7/<BUILD NUMBER>/
System tests: https://jenkins.confluent.io/job/system-test-kafka/job/%(dev_branch)s/<BUILD_NUMBER>/
/**************************************
Thanks,
<YOU>
""" % release_notification_props
print()
print()
print("*****************************************************************")
print()
print(email_contents)
print()
print("*****************************************************************")
print()
print("All artifacts should now be fully staged. Use the above template to send the announcement for the RC to the mailing list.")
print("IMPORTANT: Note that there are still some substitutions that need to be made in the template:")
print(" - Describe major changes in this release")
print(" - Deadline for voting, which should be at least 3 days after you send out the email")
print(" - Jenkins build numbers for successful unit & system test builds")
print(" - Fill in your name in the signature")
print(" - Finally, validate all the links before shipping!")
print("Note that all substitutions are annotated with <> around them.")
| []
| []
| [
"PUSH_REMOTE_NAME",
"%s_HOME\" % CAPITALIZED_PROJECT_NAM"
]
| [] | ["PUSH_REMOTE_NAME", "%s_HOME\" % CAPITALIZED_PROJECT_NAM"] | python | 2 | 0 | |
FW/edk2/BaseTools/Source/Python/AutoGen/AutoGen.py | ## @file
# Generate AutoGen.h, AutoGen.c and *.depex files
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
import Common.LongFilePathOs as os
import re
import os.path as path
import copy
import uuid
from . import GenC
from . import GenMake
from . import GenDepex
from io import BytesIO
from .StrGather import *
from .BuildEngine import BuildRule
from Common.LongFilePathSupport import CopyLongFilePath
from Common.BuildToolError import *
from Common.DataType import *
from Common.Misc import *
from Common.StringUtils import *
import Common.GlobalData as GlobalData
from GenFds.FdfParser import *
from CommonDataClass.CommonClass import SkuInfoClass
from Workspace.BuildClassObject import *
from GenPatchPcdTable.GenPatchPcdTable import parsePcdInfoFromMapFile
import Common.VpdInfoFile as VpdInfoFile
from .GenPcdDb import CreatePcdDatabaseCode
from Workspace.MetaFileCommentParser import UsageList
from Workspace.WorkspaceCommon import GetModuleLibInstances
from Common.MultipleWorkspace import MultipleWorkspace as mws
from . import InfSectionParser
import datetime
import hashlib
from .GenVar import VariableMgr, var_info
from collections import OrderedDict
from collections import defaultdict
from Workspace.WorkspaceCommon import OrderedListDict
## Regular expression for splitting Dependency Expression string into tokens
gDepexTokenPattern = re.compile("(\(|\)|\w+| \S+\.inf)")
## Regular expression for match: PCD(xxxx.yyy)
gPCDAsGuidPattern = re.compile(r"^PCD\(.+\..+\)$")
#
# Regular expression for finding Include Directories, the difference between MSFT and INTEL/GCC/RVCT
# is the former use /I , the Latter used -I to specify include directories
#
gBuildOptIncludePatternMsft = re.compile(r"(?:.*?)/I[ \t]*([^ ]*)", re.MULTILINE | re.DOTALL)
gBuildOptIncludePatternOther = re.compile(r"(?:.*?)-I[ \t]*([^ ]*)", re.MULTILINE | re.DOTALL)
#
# Match name = variable
#
gEfiVarStoreNamePattern = re.compile("\s*name\s*=\s*(\w+)")
#
# The format of guid in efivarstore statement likes following and must be correct:
# guid = {0xA04A27f4, 0xDF00, 0x4D42, {0xB5, 0x52, 0x39, 0x51, 0x13, 0x02, 0x11, 0x3D}}
#
gEfiVarStoreGuidPattern = re.compile("\s*guid\s*=\s*({.*?{.*?}\s*})")
## Mapping Makefile type
gMakeTypeMap = {"MSFT":"nmake", "GCC":"gmake"}
## Build rule configuration file
gDefaultBuildRuleFile = 'build_rule.txt'
## Tools definition configuration file
gDefaultToolsDefFile = 'tools_def.txt'
## Build rule default version
AutoGenReqBuildRuleVerNum = "0.1"
## default file name for AutoGen
gAutoGenCodeFileName = "AutoGen.c"
gAutoGenHeaderFileName = "AutoGen.h"
gAutoGenStringFileName = "%(module_name)sStrDefs.h"
gAutoGenStringFormFileName = "%(module_name)sStrDefs.hpk"
gAutoGenDepexFileName = "%(module_name)s.depex"
gAutoGenImageDefFileName = "%(module_name)sImgDefs.h"
gAutoGenIdfFileName = "%(module_name)sIdf.hpk"
gInfSpecVersion = "0x00010017"
#
# Template string to generic AsBuilt INF
#
gAsBuiltInfHeaderString = TemplateString("""${header_comments}
# DO NOT EDIT
# FILE auto-generated
[Defines]
INF_VERSION = ${module_inf_version}
BASE_NAME = ${module_name}
FILE_GUID = ${module_guid}
MODULE_TYPE = ${module_module_type}${BEGIN}
VERSION_STRING = ${module_version_string}${END}${BEGIN}
PCD_IS_DRIVER = ${pcd_is_driver_string}${END}${BEGIN}
UEFI_SPECIFICATION_VERSION = ${module_uefi_specification_version}${END}${BEGIN}
PI_SPECIFICATION_VERSION = ${module_pi_specification_version}${END}${BEGIN}
ENTRY_POINT = ${module_entry_point}${END}${BEGIN}
UNLOAD_IMAGE = ${module_unload_image}${END}${BEGIN}
CONSTRUCTOR = ${module_constructor}${END}${BEGIN}
DESTRUCTOR = ${module_destructor}${END}${BEGIN}
SHADOW = ${module_shadow}${END}${BEGIN}
PCI_VENDOR_ID = ${module_pci_vendor_id}${END}${BEGIN}
PCI_DEVICE_ID = ${module_pci_device_id}${END}${BEGIN}
PCI_CLASS_CODE = ${module_pci_class_code}${END}${BEGIN}
PCI_REVISION = ${module_pci_revision}${END}${BEGIN}
BUILD_NUMBER = ${module_build_number}${END}${BEGIN}
SPEC = ${module_spec}${END}${BEGIN}
UEFI_HII_RESOURCE_SECTION = ${module_uefi_hii_resource_section}${END}${BEGIN}
MODULE_UNI_FILE = ${module_uni_file}${END}
[Packages.${module_arch}]${BEGIN}
${package_item}${END}
[Binaries.${module_arch}]${BEGIN}
${binary_item}${END}
[PatchPcd.${module_arch}]${BEGIN}
${patchablepcd_item}
${END}
[Protocols.${module_arch}]${BEGIN}
${protocol_item}
${END}
[Ppis.${module_arch}]${BEGIN}
${ppi_item}
${END}
[Guids.${module_arch}]${BEGIN}
${guid_item}
${END}
[PcdEx.${module_arch}]${BEGIN}
${pcd_item}
${END}
[LibraryClasses.${module_arch}]
## @LIB_INSTANCES${BEGIN}
# ${libraryclasses_item}${END}
${depexsection_item}
${userextension_tianocore_item}
${tail_comments}
[BuildOptions.${module_arch}]
## @AsBuilt${BEGIN}
## ${flags_item}${END}
""")
## Base class for AutoGen
#
# This class just implements the cache mechanism of AutoGen objects.
#
class AutoGen(object):
# database to maintain the objects in each child class
__ObjectCache = {} # (BuildTarget, ToolChain, ARCH, platform file): AutoGen object
## Factory method
#
# @param Class class object of real AutoGen class
# (WorkspaceAutoGen, ModuleAutoGen or PlatformAutoGen)
# @param Workspace Workspace directory or WorkspaceAutoGen object
# @param MetaFile The path of meta file
# @param Target Build target
# @param Toolchain Tool chain name
# @param Arch Target arch
# @param *args The specific class related parameters
# @param **kwargs The specific class related dict parameters
#
def __new__(cls, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs):
# check if the object has been created
Key = (Target, Toolchain, Arch, MetaFile)
try:
# if it exists, just return it directly
return cls.__ObjectCache[Key]
except:
# it didnt exist. create it, cache it, then return it
cls.__ObjectCache[Key] = super(AutoGen, cls).__new__(cls)
return cls.__ObjectCache[Key]
def __init__ (self, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs):
super(AutoGen, self).__init__(self, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs)
## hash() operator
#
# The file path of platform file will be used to represent hash value of this object
#
# @retval int Hash value of the file path of platform file
#
def __hash__(self):
return hash(self.MetaFile)
## str() operator
#
# The file path of platform file will be used to represent this object
#
# @retval string String of platform file path
#
def __str__(self):
return str(self.MetaFile)
## "==" operator
def __eq__(self, Other):
return Other and self.MetaFile == Other
## Workspace AutoGen class
#
# This class is used mainly to control the whole platform build for different
# architecture. This class will generate top level makefile.
#
class WorkspaceAutoGen(AutoGen):
# call super().__init__ then call the worker function with different parameter count
def __init__(self, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs):
try:
self._Init
except:
super(WorkspaceAutoGen, self).__init__(Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs)
self._InitWorker(Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs)
self._Init = True
## Initialize WorkspaceAutoGen
#
# @param WorkspaceDir Root directory of workspace
# @param ActivePlatform Meta-file of active platform
# @param Target Build target
# @param Toolchain Tool chain name
# @param ArchList List of architecture of current build
# @param MetaFileDb Database containing meta-files
# @param BuildConfig Configuration of build
# @param ToolDefinition Tool chain definitions
# @param FlashDefinitionFile File of flash definition
# @param Fds FD list to be generated
# @param Fvs FV list to be generated
# @param Caps Capsule list to be generated
# @param SkuId SKU id from command line
#
def _InitWorker(self, WorkspaceDir, ActivePlatform, Target, Toolchain, ArchList, MetaFileDb,
BuildConfig, ToolDefinition, FlashDefinitionFile='', Fds=None, Fvs=None, Caps=None, SkuId='', UniFlag=None,
Progress=None, BuildModule=None):
self.BuildDatabase = MetaFileDb
self.MetaFile = ActivePlatform
self.WorkspaceDir = WorkspaceDir
self.Platform = self.BuildDatabase[self.MetaFile, TAB_ARCH_COMMON, Target, Toolchain]
GlobalData.gActivePlatform = self.Platform
self.BuildTarget = Target
self.ToolChain = Toolchain
self.ArchList = ArchList
self.SkuId = SkuId
self.UniFlag = UniFlag
self.TargetTxt = BuildConfig
self.ToolDef = ToolDefinition
self.FdfFile = FlashDefinitionFile
self.FdTargetList = Fds if Fds else []
self.FvTargetList = Fvs if Fvs else []
self.CapTargetList = Caps if Caps else []
self.AutoGenObjectList = []
self._BuildDir = None
self._FvDir = None
self._MakeFileDir = None
self._BuildCommand = None
self._GuidDict = {}
# there's many relative directory operations, so ...
os.chdir(self.WorkspaceDir)
#
# Merge Arch
#
if not self.ArchList:
ArchList = set(self.Platform.SupArchList)
else:
ArchList = set(self.ArchList) & set(self.Platform.SupArchList)
if not ArchList:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "Invalid ARCH specified. [Valid ARCH: %s]" % (" ".join(self.Platform.SupArchList)))
elif self.ArchList and len(ArchList) != len(self.ArchList):
SkippedArchList = set(self.ArchList).symmetric_difference(set(self.Platform.SupArchList))
EdkLogger.verbose("\nArch [%s] is ignored because the platform supports [%s] only!"
% (" ".join(SkippedArchList), " ".join(self.Platform.SupArchList)))
self.ArchList = tuple(ArchList)
# Validate build target
if self.BuildTarget not in self.Platform.BuildTargets:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="Build target [%s] is not supported by the platform. [Valid target: %s]"
% (self.BuildTarget, " ".join(self.Platform.BuildTargets)))
# parse FDF file to get PCDs in it, if any
if not self.FdfFile:
self.FdfFile = self.Platform.FlashDefinition
EdkLogger.info("")
if self.ArchList:
EdkLogger.info('%-16s = %s' % ("Architecture(s)", ' '.join(self.ArchList)))
EdkLogger.info('%-16s = %s' % ("Build target", self.BuildTarget))
EdkLogger.info('%-16s = %s' % ("Toolchain", self.ToolChain))
EdkLogger.info('\n%-24s = %s' % ("Active Platform", self.Platform))
if BuildModule:
EdkLogger.info('%-24s = %s' % ("Active Module", BuildModule))
if self.FdfFile:
EdkLogger.info('%-24s = %s' % ("Flash Image Definition", self.FdfFile))
EdkLogger.verbose("\nFLASH_DEFINITION = %s" % self.FdfFile)
if Progress:
Progress.Start("\nProcessing meta-data")
if self.FdfFile:
#
# Mark now build in AutoGen Phase
#
GlobalData.gAutoGenPhase = True
Fdf = FdfParser(self.FdfFile.Path)
Fdf.ParseFile()
GlobalData.gFdfParser = Fdf
GlobalData.gAutoGenPhase = False
PcdSet = Fdf.Profile.PcdDict
if Fdf.CurrentFdName and Fdf.CurrentFdName in Fdf.Profile.FdDict:
FdDict = Fdf.Profile.FdDict[Fdf.CurrentFdName]
for FdRegion in FdDict.RegionList:
if str(FdRegion.RegionType) is 'FILE' and self.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
if int(FdRegion.Offset) % 8 != 0:
EdkLogger.error("build", FORMAT_INVALID, 'The VPD Base Address %s must be 8-byte aligned.' % (FdRegion.Offset))
ModuleList = Fdf.Profile.InfList
self.FdfProfile = Fdf.Profile
for fvname in self.FvTargetList:
if fvname.upper() not in self.FdfProfile.FvDict:
EdkLogger.error("build", OPTION_VALUE_INVALID,
"No such an FV in FDF file: %s" % fvname)
# In DSC file may use FILE_GUID to override the module, then in the Platform.Modules use FILE_GUIDmodule.inf as key,
# but the path (self.MetaFile.Path) is the real path
for key in self.FdfProfile.InfDict:
if key == 'ArchTBD':
MetaFile_cache = defaultdict(set)
for Arch in self.ArchList:
Current_Platform_cache = self.BuildDatabase[self.MetaFile, Arch, Target, Toolchain]
for Pkey in Current_Platform_cache.Modules:
MetaFile_cache[Arch].add(Current_Platform_cache.Modules[Pkey].MetaFile)
for Inf in self.FdfProfile.InfDict[key]:
ModuleFile = PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch)
for Arch in self.ArchList:
if ModuleFile in MetaFile_cache[Arch]:
break
else:
ModuleData = self.BuildDatabase[ModuleFile, Arch, Target, Toolchain]
if not ModuleData.IsBinaryModule:
EdkLogger.error('build', PARSER_ERROR, "Module %s NOT found in DSC file; Is it really a binary module?" % ModuleFile)
else:
for Arch in self.ArchList:
if Arch == key:
Platform = self.BuildDatabase[self.MetaFile, Arch, Target, Toolchain]
MetaFileList = set()
for Pkey in Platform.Modules:
MetaFileList.add(Platform.Modules[Pkey].MetaFile)
for Inf in self.FdfProfile.InfDict[key]:
ModuleFile = PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch)
if ModuleFile in MetaFileList:
continue
ModuleData = self.BuildDatabase[ModuleFile, Arch, Target, Toolchain]
if not ModuleData.IsBinaryModule:
EdkLogger.error('build', PARSER_ERROR, "Module %s NOT found in DSC file; Is it really a binary module?" % ModuleFile)
else:
PcdSet = {}
ModuleList = []
self.FdfProfile = None
if self.FdTargetList:
EdkLogger.info("No flash definition file found. FD [%s] will be ignored." % " ".join(self.FdTargetList))
self.FdTargetList = []
if self.FvTargetList:
EdkLogger.info("No flash definition file found. FV [%s] will be ignored." % " ".join(self.FvTargetList))
self.FvTargetList = []
if self.CapTargetList:
EdkLogger.info("No flash definition file found. Capsule [%s] will be ignored." % " ".join(self.CapTargetList))
self.CapTargetList = []
# apply SKU and inject PCDs from Flash Definition file
for Arch in self.ArchList:
Platform = self.BuildDatabase[self.MetaFile, Arch, Target, Toolchain]
PlatformPcds = Platform.Pcds
self._GuidDict = Platform._GuidDict
SourcePcdDict = {TAB_PCDS_DYNAMIC_EX:set(), TAB_PCDS_PATCHABLE_IN_MODULE:set(),TAB_PCDS_DYNAMIC:set(),TAB_PCDS_FIXED_AT_BUILD:set()}
BinaryPcdDict = {TAB_PCDS_DYNAMIC_EX:set(), TAB_PCDS_PATCHABLE_IN_MODULE:set()}
SourcePcdDict_Keys = SourcePcdDict.keys()
BinaryPcdDict_Keys = BinaryPcdDict.keys()
# generate the SourcePcdDict and BinaryPcdDict
PGen = PlatformAutoGen(self, self.MetaFile, Target, Toolchain, Arch)
for BuildData in PGen.BuildDatabase._CACHE_.values():
if BuildData.Arch != Arch:
continue
if BuildData.MetaFile.Ext == '.inf':
for key in BuildData.Pcds:
if BuildData.Pcds[key].Pending:
if key in Platform.Pcds:
PcdInPlatform = Platform.Pcds[key]
if PcdInPlatform.Type:
BuildData.Pcds[key].Type = PcdInPlatform.Type
BuildData.Pcds[key].Pending = False
if BuildData.MetaFile in Platform.Modules:
PlatformModule = Platform.Modules[str(BuildData.MetaFile)]
if key in PlatformModule.Pcds:
PcdInPlatform = PlatformModule.Pcds[key]
if PcdInPlatform.Type:
BuildData.Pcds[key].Type = PcdInPlatform.Type
BuildData.Pcds[key].Pending = False
else:
#Pcd used in Library, Pcd Type from reference module if Pcd Type is Pending
if BuildData.Pcds[key].Pending:
MGen = ModuleAutoGen(self, BuildData.MetaFile, Target, Toolchain, Arch, self.MetaFile)
if MGen and MGen.IsLibrary:
if MGen in PGen.LibraryAutoGenList:
ReferenceModules = MGen._ReferenceModules
for ReferenceModule in ReferenceModules:
if ReferenceModule.MetaFile in Platform.Modules:
RefPlatformModule = Platform.Modules[str(ReferenceModule.MetaFile)]
if key in RefPlatformModule.Pcds:
PcdInReferenceModule = RefPlatformModule.Pcds[key]
if PcdInReferenceModule.Type:
BuildData.Pcds[key].Type = PcdInReferenceModule.Type
BuildData.Pcds[key].Pending = False
break
if TAB_PCDS_DYNAMIC_EX in BuildData.Pcds[key].Type:
if BuildData.IsBinaryModule:
BinaryPcdDict[TAB_PCDS_DYNAMIC_EX].add((BuildData.Pcds[key].TokenCName, BuildData.Pcds[key].TokenSpaceGuidCName))
else:
SourcePcdDict[TAB_PCDS_DYNAMIC_EX].add((BuildData.Pcds[key].TokenCName, BuildData.Pcds[key].TokenSpaceGuidCName))
elif TAB_PCDS_PATCHABLE_IN_MODULE in BuildData.Pcds[key].Type:
if BuildData.MetaFile.Ext == '.inf':
if BuildData.IsBinaryModule:
BinaryPcdDict[TAB_PCDS_PATCHABLE_IN_MODULE].add((BuildData.Pcds[key].TokenCName, BuildData.Pcds[key].TokenSpaceGuidCName))
else:
SourcePcdDict[TAB_PCDS_PATCHABLE_IN_MODULE].add((BuildData.Pcds[key].TokenCName, BuildData.Pcds[key].TokenSpaceGuidCName))
elif TAB_PCDS_DYNAMIC in BuildData.Pcds[key].Type:
SourcePcdDict[TAB_PCDS_DYNAMIC].add((BuildData.Pcds[key].TokenCName, BuildData.Pcds[key].TokenSpaceGuidCName))
elif TAB_PCDS_FIXED_AT_BUILD in BuildData.Pcds[key].Type:
SourcePcdDict[TAB_PCDS_FIXED_AT_BUILD].add((BuildData.Pcds[key].TokenCName, BuildData.Pcds[key].TokenSpaceGuidCName))
else:
pass
#
# A PCD can only use one type for all source modules
#
for i in SourcePcdDict_Keys:
for j in SourcePcdDict_Keys:
if i != j:
Intersections = SourcePcdDict[i].intersection(SourcePcdDict[j])
if len(Intersections) > 0:
EdkLogger.error(
'build',
FORMAT_INVALID,
"Building modules from source INFs, following PCD use %s and %s access method. It must be corrected to use only one access method." % (i, j),
ExtraData='\n\t'.join(str(P[1]+'.'+P[0]) for P in Intersections)
)
#
# intersection the BinaryPCD for Mixed PCD
#
for i in BinaryPcdDict_Keys:
for j in BinaryPcdDict_Keys:
if i != j:
Intersections = BinaryPcdDict[i].intersection(BinaryPcdDict[j])
for item in Intersections:
NewPcd1 = (item[0] + '_' + i, item[1])
NewPcd2 = (item[0] + '_' + j, item[1])
if item not in GlobalData.MixedPcd:
GlobalData.MixedPcd[item] = [NewPcd1, NewPcd2]
else:
if NewPcd1 not in GlobalData.MixedPcd[item]:
GlobalData.MixedPcd[item].append(NewPcd1)
if NewPcd2 not in GlobalData.MixedPcd[item]:
GlobalData.MixedPcd[item].append(NewPcd2)
#
# intersection the SourcePCD and BinaryPCD for Mixed PCD
#
for i in SourcePcdDict_Keys:
for j in BinaryPcdDict_Keys:
if i != j:
Intersections = SourcePcdDict[i].intersection(BinaryPcdDict[j])
for item in Intersections:
NewPcd1 = (item[0] + '_' + i, item[1])
NewPcd2 = (item[0] + '_' + j, item[1])
if item not in GlobalData.MixedPcd:
GlobalData.MixedPcd[item] = [NewPcd1, NewPcd2]
else:
if NewPcd1 not in GlobalData.MixedPcd[item]:
GlobalData.MixedPcd[item].append(NewPcd1)
if NewPcd2 not in GlobalData.MixedPcd[item]:
GlobalData.MixedPcd[item].append(NewPcd2)
for BuildData in PGen.BuildDatabase._CACHE_.values():
if BuildData.Arch != Arch:
continue
for key in BuildData.Pcds:
for SinglePcd in GlobalData.MixedPcd:
if (BuildData.Pcds[key].TokenCName, BuildData.Pcds[key].TokenSpaceGuidCName) == SinglePcd:
for item in GlobalData.MixedPcd[SinglePcd]:
Pcd_Type = item[0].split('_')[-1]
if (Pcd_Type == BuildData.Pcds[key].Type) or (Pcd_Type == TAB_PCDS_DYNAMIC_EX and BuildData.Pcds[key].Type in PCD_DYNAMIC_EX_TYPE_SET) or \
(Pcd_Type == TAB_PCDS_DYNAMIC and BuildData.Pcds[key].Type in PCD_DYNAMIC_TYPE_SET):
Value = BuildData.Pcds[key]
Value.TokenCName = BuildData.Pcds[key].TokenCName + '_' + Pcd_Type
if len(key) == 2:
newkey = (Value.TokenCName, key[1])
elif len(key) == 3:
newkey = (Value.TokenCName, key[1], key[2])
del BuildData.Pcds[key]
BuildData.Pcds[newkey] = Value
break
break
# handle the mixed pcd in FDF file
for key in PcdSet:
if key in GlobalData.MixedPcd:
Value = PcdSet[key]
del PcdSet[key]
for item in GlobalData.MixedPcd[key]:
PcdSet[item] = Value
#Collect package set information from INF of FDF
PkgSet = set()
for Inf in ModuleList:
ModuleFile = PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch)
if ModuleFile in Platform.Modules:
continue
ModuleData = self.BuildDatabase[ModuleFile, Arch, Target, Toolchain]
PkgSet.update(ModuleData.Packages)
Pkgs = list(PkgSet) + list(PGen.PackageList)
DecPcds = set()
DecPcdsKey = set()
for Pkg in Pkgs:
for Pcd in Pkg.Pcds:
DecPcds.add((Pcd[0], Pcd[1]))
DecPcdsKey.add((Pcd[0], Pcd[1], Pcd[2]))
Platform.SkuName = self.SkuId
for Name, Guid,Fileds in PcdSet:
if (Name, Guid) not in DecPcds:
EdkLogger.error(
'build',
PARSER_ERROR,
"PCD (%s.%s) used in FDF is not declared in DEC files." % (Guid, Name),
File = self.FdfProfile.PcdFileLineDict[Name, Guid][0],
Line = self.FdfProfile.PcdFileLineDict[Name, Guid][1]
)
else:
# Check whether Dynamic or DynamicEx PCD used in FDF file. If used, build break and give a error message.
if (Name, Guid, TAB_PCDS_FIXED_AT_BUILD) in DecPcdsKey \
or (Name, Guid, TAB_PCDS_PATCHABLE_IN_MODULE) in DecPcdsKey \
or (Name, Guid, TAB_PCDS_FEATURE_FLAG) in DecPcdsKey:
continue
elif (Name, Guid, TAB_PCDS_DYNAMIC) in DecPcdsKey or (Name, Guid, TAB_PCDS_DYNAMIC_EX) in DecPcdsKey:
EdkLogger.error(
'build',
PARSER_ERROR,
"Using Dynamic or DynamicEx type of PCD [%s.%s] in FDF file is not allowed." % (Guid, Name),
File = self.FdfProfile.PcdFileLineDict[Name, Guid][0],
Line = self.FdfProfile.PcdFileLineDict[Name, Guid][1]
)
Pa = PlatformAutoGen(self, self.MetaFile, Target, Toolchain, Arch)
#
# Explicitly collect platform's dynamic PCDs
#
Pa.CollectPlatformDynamicPcds()
Pa.CollectFixedAtBuildPcds()
self.AutoGenObjectList.append(Pa)
#
# Generate Package level hash value
#
GlobalData.gPackageHash[Arch] = {}
if GlobalData.gUseHashCache:
for Pkg in Pkgs:
self._GenPkgLevelHash(Pkg)
#
# Check PCDs token value conflict in each DEC file.
#
self._CheckAllPcdsTokenValueConflict()
#
# Check PCD type and definition between DSC and DEC
#
self._CheckPcdDefineAndType()
# if self.FdfFile:
# self._CheckDuplicateInFV(Fdf)
#
# Create BuildOptions Macro & PCD metafile, also add the Active Platform and FDF file.
#
content = 'gCommandLineDefines: '
content += str(GlobalData.gCommandLineDefines)
content += os.linesep
content += 'BuildOptionPcd: '
content += str(GlobalData.BuildOptionPcd)
content += os.linesep
content += 'Active Platform: '
content += str(self.Platform)
content += os.linesep
if self.FdfFile:
content += 'Flash Image Definition: '
content += str(self.FdfFile)
content += os.linesep
SaveFileOnChange(os.path.join(self.BuildDir, 'BuildOptions'), content, False)
#
# Create PcdToken Number file for Dynamic/DynamicEx Pcd.
#
PcdTokenNumber = 'PcdTokenNumber: '
if Pa.PcdTokenNumber:
if Pa.DynamicPcdList:
for Pcd in Pa.DynamicPcdList:
PcdTokenNumber += os.linesep
PcdTokenNumber += str((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
PcdTokenNumber += ' : '
PcdTokenNumber += str(Pa.PcdTokenNumber[Pcd.TokenCName, Pcd.TokenSpaceGuidCName])
SaveFileOnChange(os.path.join(self.BuildDir, 'PcdTokenNumber'), PcdTokenNumber, False)
#
# Get set of workspace metafiles
#
AllWorkSpaceMetaFiles = self._GetMetaFiles(Target, Toolchain, Arch)
#
# Retrieve latest modified time of all metafiles
#
SrcTimeStamp = 0
for f in AllWorkSpaceMetaFiles:
if os.stat(f)[8] > SrcTimeStamp:
SrcTimeStamp = os.stat(f)[8]
self._SrcTimeStamp = SrcTimeStamp
if GlobalData.gUseHashCache:
m = hashlib.md5()
for files in AllWorkSpaceMetaFiles:
if files.endswith('.dec'):
continue
f = open(files, 'r')
Content = f.read()
f.close()
m.update(Content)
SaveFileOnChange(os.path.join(self.BuildDir, 'AutoGen.hash'), m.hexdigest(), True)
GlobalData.gPlatformHash = m.hexdigest()
#
# Write metafile list to build directory
#
AutoGenFilePath = os.path.join(self.BuildDir, 'AutoGen')
if os.path.exists (AutoGenFilePath):
os.remove(AutoGenFilePath)
if not os.path.exists(self.BuildDir):
os.makedirs(self.BuildDir)
with open(os.path.join(self.BuildDir, 'AutoGen'), 'w+') as file:
for f in AllWorkSpaceMetaFiles:
print(f, file=file)
return True
def _GenPkgLevelHash(self, Pkg):
if Pkg.PackageName in GlobalData.gPackageHash[Pkg.Arch]:
return
PkgDir = os.path.join(self.BuildDir, Pkg.Arch, Pkg.PackageName)
CreateDirectory(PkgDir)
HashFile = os.path.join(PkgDir, Pkg.PackageName + '.hash')
m = hashlib.md5()
# Get .dec file's hash value
f = open(Pkg.MetaFile.Path, 'r')
Content = f.read()
f.close()
m.update(Content)
# Get include files hash value
if Pkg.Includes:
for inc in sorted(Pkg.Includes, key=lambda x: str(x)):
for Root, Dirs, Files in os.walk(str(inc)):
for File in sorted(Files):
File_Path = os.path.join(Root, File)
f = open(File_Path, 'r')
Content = f.read()
f.close()
m.update(Content)
SaveFileOnChange(HashFile, m.hexdigest(), True)
GlobalData.gPackageHash[Pkg.Arch][Pkg.PackageName] = m.hexdigest()
def _GetMetaFiles(self, Target, Toolchain, Arch):
AllWorkSpaceMetaFiles = set()
#
# add fdf
#
if self.FdfFile:
AllWorkSpaceMetaFiles.add (self.FdfFile.Path)
for f in GlobalData.gFdfParser.GetAllIncludedFile():
AllWorkSpaceMetaFiles.add (f.FileName)
#
# add dsc
#
AllWorkSpaceMetaFiles.add(self.MetaFile.Path)
#
# add build_rule.txt & tools_def.txt
#
AllWorkSpaceMetaFiles.add(os.path.join(GlobalData.gConfDirectory, gDefaultBuildRuleFile))
AllWorkSpaceMetaFiles.add(os.path.join(GlobalData.gConfDirectory, gDefaultToolsDefFile))
# add BuildOption metafile
#
AllWorkSpaceMetaFiles.add(os.path.join(self.BuildDir, 'BuildOptions'))
# add PcdToken Number file for Dynamic/DynamicEx Pcd
#
AllWorkSpaceMetaFiles.add(os.path.join(self.BuildDir, 'PcdTokenNumber'))
for Arch in self.ArchList:
#
# add dec
#
for Package in PlatformAutoGen(self, self.MetaFile, Target, Toolchain, Arch).PackageList:
AllWorkSpaceMetaFiles.add(Package.MetaFile.Path)
#
# add included dsc
#
for filePath in self.BuildDatabase[self.MetaFile, Arch, Target, Toolchain]._RawData.IncludedFiles:
AllWorkSpaceMetaFiles.add(filePath.Path)
return AllWorkSpaceMetaFiles
## _CheckDuplicateInFV() method
#
# Check whether there is duplicate modules/files exist in FV section.
# The check base on the file GUID;
#
def _CheckDuplicateInFV(self, Fdf):
for Fv in Fdf.Profile.FvDict:
_GuidDict = {}
for FfsFile in Fdf.Profile.FvDict[Fv].FfsList:
if FfsFile.InfFileName and FfsFile.NameGuid is None:
#
# Get INF file GUID
#
InfFoundFlag = False
for Pa in self.AutoGenObjectList:
if InfFoundFlag:
break
for Module in Pa.ModuleAutoGenList:
if path.normpath(Module.MetaFile.File) == path.normpath(FfsFile.InfFileName):
InfFoundFlag = True
if Module.Guid.upper() not in _GuidDict:
_GuidDict[Module.Guid.upper()] = FfsFile
break
else:
EdkLogger.error("build",
FORMAT_INVALID,
"Duplicate GUID found for these lines: Line %d: %s and Line %d: %s. GUID: %s" % (FfsFile.CurrentLineNum,
FfsFile.CurrentLineContent,
_GuidDict[Module.Guid.upper()].CurrentLineNum,
_GuidDict[Module.Guid.upper()].CurrentLineContent,
Module.Guid.upper()),
ExtraData=self.FdfFile)
#
# Some INF files not have entity in DSC file.
#
if not InfFoundFlag:
if FfsFile.InfFileName.find('$') == -1:
InfPath = NormPath(FfsFile.InfFileName)
if not os.path.exists(InfPath):
EdkLogger.error('build', GENFDS_ERROR, "Non-existant Module %s !" % (FfsFile.InfFileName))
PathClassObj = PathClass(FfsFile.InfFileName, self.WorkspaceDir)
#
# Here we just need to get FILE_GUID from INF file, use 'COMMON' as ARCH attribute. and use
# BuildObject from one of AutoGenObjectList is enough.
#
InfObj = self.AutoGenObjectList[0].BuildDatabase.WorkspaceDb.BuildObject[PathClassObj, TAB_ARCH_COMMON, self.BuildTarget, self.ToolChain]
if InfObj.Guid.upper() not in _GuidDict:
_GuidDict[InfObj.Guid.upper()] = FfsFile
else:
EdkLogger.error("build",
FORMAT_INVALID,
"Duplicate GUID found for these lines: Line %d: %s and Line %d: %s. GUID: %s" % (FfsFile.CurrentLineNum,
FfsFile.CurrentLineContent,
_GuidDict[InfObj.Guid.upper()].CurrentLineNum,
_GuidDict[InfObj.Guid.upper()].CurrentLineContent,
InfObj.Guid.upper()),
ExtraData=self.FdfFile)
InfFoundFlag = False
if FfsFile.NameGuid is not None:
#
# If the NameGuid reference a PCD name.
# The style must match: PCD(xxxx.yyy)
#
if gPCDAsGuidPattern.match(FfsFile.NameGuid):
#
# Replace the PCD value.
#
_PcdName = FfsFile.NameGuid.lstrip("PCD(").rstrip(")")
PcdFoundFlag = False
for Pa in self.AutoGenObjectList:
if not PcdFoundFlag:
for PcdItem in Pa.AllPcdList:
if (PcdItem.TokenSpaceGuidCName + "." + PcdItem.TokenCName) == _PcdName:
#
# First convert from CFormatGuid to GUID string
#
_PcdGuidString = GuidStructureStringToGuidString(PcdItem.DefaultValue)
if not _PcdGuidString:
#
# Then try Byte array.
#
_PcdGuidString = GuidStructureByteArrayToGuidString(PcdItem.DefaultValue)
if not _PcdGuidString:
#
# Not Byte array or CFormat GUID, raise error.
#
EdkLogger.error("build",
FORMAT_INVALID,
"The format of PCD value is incorrect. PCD: %s , Value: %s\n" % (_PcdName, PcdItem.DefaultValue),
ExtraData=self.FdfFile)
if _PcdGuidString.upper() not in _GuidDict:
_GuidDict[_PcdGuidString.upper()] = FfsFile
PcdFoundFlag = True
break
else:
EdkLogger.error("build",
FORMAT_INVALID,
"Duplicate GUID found for these lines: Line %d: %s and Line %d: %s. GUID: %s" % (FfsFile.CurrentLineNum,
FfsFile.CurrentLineContent,
_GuidDict[_PcdGuidString.upper()].CurrentLineNum,
_GuidDict[_PcdGuidString.upper()].CurrentLineContent,
FfsFile.NameGuid.upper()),
ExtraData=self.FdfFile)
if FfsFile.NameGuid.upper() not in _GuidDict:
_GuidDict[FfsFile.NameGuid.upper()] = FfsFile
else:
#
# Two raw file GUID conflict.
#
EdkLogger.error("build",
FORMAT_INVALID,
"Duplicate GUID found for these lines: Line %d: %s and Line %d: %s. GUID: %s" % (FfsFile.CurrentLineNum,
FfsFile.CurrentLineContent,
_GuidDict[FfsFile.NameGuid.upper()].CurrentLineNum,
_GuidDict[FfsFile.NameGuid.upper()].CurrentLineContent,
FfsFile.NameGuid.upper()),
ExtraData=self.FdfFile)
def _CheckPcdDefineAndType(self):
PcdTypeSet = {TAB_PCDS_FIXED_AT_BUILD,
TAB_PCDS_PATCHABLE_IN_MODULE,
TAB_PCDS_FEATURE_FLAG,
TAB_PCDS_DYNAMIC,
TAB_PCDS_DYNAMIC_EX}
# This dict store PCDs which are not used by any modules with specified arches
UnusedPcd = OrderedDict()
for Pa in self.AutoGenObjectList:
# Key of DSC's Pcds dictionary is PcdCName, TokenSpaceGuid
for Pcd in Pa.Platform.Pcds:
PcdType = Pa.Platform.Pcds[Pcd].Type
# If no PCD type, this PCD comes from FDF
if not PcdType:
continue
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for Package in Pa.PackageList:
# Key of DEC's Pcds dictionary is PcdCName, TokenSpaceGuid, PcdType
if (Pcd[0], Pcd[1], PcdType) in Package.Pcds:
break
for Type in PcdTypeSet:
if (Pcd[0], Pcd[1], Type) in Package.Pcds:
EdkLogger.error(
'build',
FORMAT_INVALID,
"Type [%s] of PCD [%s.%s] in DSC file doesn't match the type [%s] defined in DEC file." \
% (Pa.Platform.Pcds[Pcd].Type, Pcd[1], Pcd[0], Type),
ExtraData=None
)
return
else:
UnusedPcd.setdefault(Pcd, []).append(Pa.Arch)
for Pcd in UnusedPcd:
EdkLogger.warn(
'build',
"The PCD was not specified by any INF module in the platform for the given architecture.\n"
"\tPCD: [%s.%s]\n\tPlatform: [%s]\n\tArch: %s"
% (Pcd[1], Pcd[0], os.path.basename(str(self.MetaFile)), str(UnusedPcd[Pcd])),
ExtraData=None
)
def __repr__(self):
return "%s [%s]" % (self.MetaFile, ", ".join(self.ArchList))
## Return the directory to store FV files
def _GetFvDir(self):
if self._FvDir is None:
self._FvDir = path.join(self.BuildDir, TAB_FV_DIRECTORY)
return self._FvDir
## Return the directory to store all intermediate and final files built
def _GetBuildDir(self):
if self._BuildDir is None:
return self.AutoGenObjectList[0].BuildDir
## Return the build output directory platform specifies
def _GetOutputDir(self):
return self.Platform.OutputDirectory
## Return platform name
def _GetName(self):
return self.Platform.PlatformName
## Return meta-file GUID
def _GetGuid(self):
return self.Platform.Guid
## Return platform version
def _GetVersion(self):
return self.Platform.Version
## Return paths of tools
def _GetToolDefinition(self):
return self.AutoGenObjectList[0].ToolDefinition
## Return directory of platform makefile
#
# @retval string Makefile directory
#
def _GetMakeFileDir(self):
if self._MakeFileDir is None:
self._MakeFileDir = self.BuildDir
return self._MakeFileDir
## Return build command string
#
# @retval string Build command string
#
def _GetBuildCommand(self):
if self._BuildCommand is None:
# BuildCommand should be all the same. So just get one from platform AutoGen
self._BuildCommand = self.AutoGenObjectList[0].BuildCommand
return self._BuildCommand
## Check the PCDs token value conflict in each DEC file.
#
# Will cause build break and raise error message while two PCDs conflict.
#
# @return None
#
def _CheckAllPcdsTokenValueConflict(self):
for Pa in self.AutoGenObjectList:
for Package in Pa.PackageList:
PcdList = Package.Pcds.values()
PcdList.sort(lambda x, y: cmp(int(x.TokenValue, 0), int(y.TokenValue, 0)))
Count = 0
while (Count < len(PcdList) - 1) :
Item = PcdList[Count]
ItemNext = PcdList[Count + 1]
#
# Make sure in the same token space the TokenValue should be unique
#
if (int(Item.TokenValue, 0) == int(ItemNext.TokenValue, 0)):
SameTokenValuePcdList = []
SameTokenValuePcdList.append(Item)
SameTokenValuePcdList.append(ItemNext)
RemainPcdListLength = len(PcdList) - Count - 2
for ValueSameCount in range(RemainPcdListLength):
if int(PcdList[len(PcdList) - RemainPcdListLength + ValueSameCount].TokenValue, 0) == int(Item.TokenValue, 0):
SameTokenValuePcdList.append(PcdList[len(PcdList) - RemainPcdListLength + ValueSameCount])
else:
break;
#
# Sort same token value PCD list with TokenGuid and TokenCName
#
SameTokenValuePcdList.sort(lambda x, y: cmp("%s.%s" % (x.TokenSpaceGuidCName, x.TokenCName), "%s.%s" % (y.TokenSpaceGuidCName, y.TokenCName)))
SameTokenValuePcdListCount = 0
while (SameTokenValuePcdListCount < len(SameTokenValuePcdList) - 1):
Flag = False
TemListItem = SameTokenValuePcdList[SameTokenValuePcdListCount]
TemListItemNext = SameTokenValuePcdList[SameTokenValuePcdListCount + 1]
if (TemListItem.TokenSpaceGuidCName == TemListItemNext.TokenSpaceGuidCName) and (TemListItem.TokenCName != TemListItemNext.TokenCName):
for PcdItem in GlobalData.MixedPcd:
if (TemListItem.TokenCName, TemListItem.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem] or \
(TemListItemNext.TokenCName, TemListItemNext.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem]:
Flag = True
if not Flag:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The TokenValue [%s] of PCD [%s.%s] is conflict with: [%s.%s] in %s"\
% (TemListItem.TokenValue, TemListItem.TokenSpaceGuidCName, TemListItem.TokenCName, TemListItemNext.TokenSpaceGuidCName, TemListItemNext.TokenCName, Package),
ExtraData=None
)
SameTokenValuePcdListCount += 1
Count += SameTokenValuePcdListCount
Count += 1
PcdList = Package.Pcds.values()
PcdList.sort(lambda x, y: cmp("%s.%s" % (x.TokenSpaceGuidCName, x.TokenCName), "%s.%s" % (y.TokenSpaceGuidCName, y.TokenCName)))
Count = 0
while (Count < len(PcdList) - 1) :
Item = PcdList[Count]
ItemNext = PcdList[Count + 1]
#
# Check PCDs with same TokenSpaceGuidCName.TokenCName have same token value as well.
#
if (Item.TokenSpaceGuidCName == ItemNext.TokenSpaceGuidCName) and (Item.TokenCName == ItemNext.TokenCName) and (int(Item.TokenValue, 0) != int(ItemNext.TokenValue, 0)):
EdkLogger.error(
'build',
FORMAT_INVALID,
"The TokenValue [%s] of PCD [%s.%s] in %s defined in two places should be same as well."\
% (Item.TokenValue, Item.TokenSpaceGuidCName, Item.TokenCName, Package),
ExtraData=None
)
Count += 1
## Generate fds command
def _GenFdsCommand(self):
return (GenMake.TopLevelMakefile(self)._TEMPLATE_.Replace(GenMake.TopLevelMakefile(self)._TemplateDict)).strip()
## Create makefile for the platform and modules in it
#
# @param CreateDepsMakeFile Flag indicating if the makefile for
# modules will be created as well
#
def CreateMakeFile(self, CreateDepsMakeFile=False):
if not CreateDepsMakeFile:
return
for Pa in self.AutoGenObjectList:
Pa.CreateMakeFile(True)
## Create autogen code for platform and modules
#
# Since there's no autogen code for platform, this method will do nothing
# if CreateModuleCodeFile is set to False.
#
# @param CreateDepsCodeFile Flag indicating if creating module's
# autogen code file or not
#
def CreateCodeFile(self, CreateDepsCodeFile=False):
if not CreateDepsCodeFile:
return
for Pa in self.AutoGenObjectList:
Pa.CreateCodeFile(True)
## Create AsBuilt INF file the platform
#
def CreateAsBuiltInf(self):
return
Name = property(_GetName)
Guid = property(_GetGuid)
Version = property(_GetVersion)
OutputDir = property(_GetOutputDir)
ToolDefinition = property(_GetToolDefinition) # toolcode : tool path
BuildDir = property(_GetBuildDir)
FvDir = property(_GetFvDir)
MakeFileDir = property(_GetMakeFileDir)
BuildCommand = property(_GetBuildCommand)
GenFdsCommand = property(_GenFdsCommand)
## AutoGen class for platform
#
# PlatformAutoGen class will process the original information in platform
# file in order to generate makefile for platform.
#
class PlatformAutoGen(AutoGen):
# call super().__init__ then call the worker function with different parameter count
def __init__(self, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs):
try:
self._Init
except:
super(PlatformAutoGen, self).__init__(self, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs)
self._InitWorker(Workspace, MetaFile, Target, Toolchain, Arch)
self._Init = True
#
# Used to store all PCDs for both PEI and DXE phase, in order to generate
# correct PCD database
#
_DynaPcdList_ = []
_NonDynaPcdList_ = []
_PlatformPcds = {}
#
# The priority list while override build option
#
PrioList = {"0x11111" : 16, # TARGET_TOOLCHAIN_ARCH_COMMANDTYPE_ATTRIBUTE (Highest)
"0x01111" : 15, # ******_TOOLCHAIN_ARCH_COMMANDTYPE_ATTRIBUTE
"0x10111" : 14, # TARGET_*********_ARCH_COMMANDTYPE_ATTRIBUTE
"0x00111" : 13, # ******_*********_ARCH_COMMANDTYPE_ATTRIBUTE
"0x11011" : 12, # TARGET_TOOLCHAIN_****_COMMANDTYPE_ATTRIBUTE
"0x01011" : 11, # ******_TOOLCHAIN_****_COMMANDTYPE_ATTRIBUTE
"0x10011" : 10, # TARGET_*********_****_COMMANDTYPE_ATTRIBUTE
"0x00011" : 9, # ******_*********_****_COMMANDTYPE_ATTRIBUTE
"0x11101" : 8, # TARGET_TOOLCHAIN_ARCH_***********_ATTRIBUTE
"0x01101" : 7, # ******_TOOLCHAIN_ARCH_***********_ATTRIBUTE
"0x10101" : 6, # TARGET_*********_ARCH_***********_ATTRIBUTE
"0x00101" : 5, # ******_*********_ARCH_***********_ATTRIBUTE
"0x11001" : 4, # TARGET_TOOLCHAIN_****_***********_ATTRIBUTE
"0x01001" : 3, # ******_TOOLCHAIN_****_***********_ATTRIBUTE
"0x10001" : 2, # TARGET_*********_****_***********_ATTRIBUTE
"0x00001" : 1} # ******_*********_****_***********_ATTRIBUTE (Lowest)
## Initialize PlatformAutoGen
#
#
# @param Workspace WorkspaceAutoGen object
# @param PlatformFile Platform file (DSC file)
# @param Target Build target (DEBUG, RELEASE)
# @param Toolchain Name of tool chain
# @param Arch arch of the platform supports
#
def _InitWorker(self, Workspace, PlatformFile, Target, Toolchain, Arch):
EdkLogger.debug(EdkLogger.DEBUG_9, "AutoGen platform [%s] [%s]" % (PlatformFile, Arch))
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (PlatformFile, Arch, Toolchain, Target)
self.MetaFile = PlatformFile
self.Workspace = Workspace
self.WorkspaceDir = Workspace.WorkspaceDir
self.ToolChain = Toolchain
self.BuildTarget = Target
self.Arch = Arch
self.SourceDir = PlatformFile.SubDir
self.SourceOverrideDir = None
self.FdTargetList = self.Workspace.FdTargetList
self.FvTargetList = self.Workspace.FvTargetList
self.AllPcdList = []
# get the original module/package/platform objects
self.BuildDatabase = Workspace.BuildDatabase
self.DscBuildDataObj = Workspace.Platform
self._GuidDict = Workspace._GuidDict
# flag indicating if the makefile/C-code file has been created or not
self.IsMakeFileCreated = False
self.IsCodeFileCreated = False
self._Platform = None
self._Name = None
self._Guid = None
self._Version = None
self._BuildRule = None
self._SourceDir = None
self._BuildDir = None
self._OutputDir = None
self._FvDir = None
self._MakeFileDir = None
self._FdfFile = None
self._PcdTokenNumber = None # (TokenCName, TokenSpaceGuidCName) : GeneratedTokenNumber
self._DynamicPcdList = None # [(TokenCName1, TokenSpaceGuidCName1), (TokenCName2, TokenSpaceGuidCName2), ...]
self._NonDynamicPcdList = None # [(TokenCName1, TokenSpaceGuidCName1), (TokenCName2, TokenSpaceGuidCName2), ...]
self._NonDynamicPcdDict = {}
self._ToolDefinitions = None
self._ToolDefFile = None # toolcode : tool path
self._ToolChainFamily = None
self._BuildRuleFamily = None
self._BuildOption = None # toolcode : option
self._EdkBuildOption = None # edktoolcode : option
self._EdkIIBuildOption = None # edkiitoolcode : option
self._PackageList = None
self._ModuleAutoGenList = None
self._LibraryAutoGenList = None
self._BuildCommand = None
self._AsBuildInfList = []
self._AsBuildModuleList = []
self.VariableInfo = None
if GlobalData.gFdfParser is not None:
self._AsBuildInfList = GlobalData.gFdfParser.Profile.InfList
for Inf in self._AsBuildInfList:
InfClass = PathClass(NormPath(Inf), GlobalData.gWorkspace, self.Arch)
M = self.BuildDatabase[InfClass, self.Arch, self.BuildTarget, self.ToolChain]
if not M.IsSupportedArch:
continue
self._AsBuildModuleList.append(InfClass)
# get library/modules for build
self.LibraryBuildDirectoryList = []
self.ModuleBuildDirectoryList = []
return True
def __repr__(self):
return "%s [%s]" % (self.MetaFile, self.Arch)
## Create autogen code for platform and modules
#
# Since there's no autogen code for platform, this method will do nothing
# if CreateModuleCodeFile is set to False.
#
# @param CreateModuleCodeFile Flag indicating if creating module's
# autogen code file or not
#
def CreateCodeFile(self, CreateModuleCodeFile=False):
# only module has code to be greated, so do nothing if CreateModuleCodeFile is False
if self.IsCodeFileCreated or not CreateModuleCodeFile:
return
for Ma in self.ModuleAutoGenList:
Ma.CreateCodeFile(True)
# don't do this twice
self.IsCodeFileCreated = True
## Generate Fds Command
def _GenFdsCommand(self):
return self.Workspace.GenFdsCommand
## Create makefile for the platform and mdoules in it
#
# @param CreateModuleMakeFile Flag indicating if the makefile for
# modules will be created as well
#
def CreateMakeFile(self, CreateModuleMakeFile=False, FfsCommand = {}):
if CreateModuleMakeFile:
for ModuleFile in self.Platform.Modules:
Ma = ModuleAutoGen(self.Workspace, ModuleFile, self.BuildTarget,
self.ToolChain, self.Arch, self.MetaFile)
if (ModuleFile.File, self.Arch) in FfsCommand:
Ma.CreateMakeFile(True, FfsCommand[ModuleFile.File, self.Arch])
else:
Ma.CreateMakeFile(True)
#Ma.CreateAsBuiltInf()
# no need to create makefile for the platform more than once
if self.IsMakeFileCreated:
return
# create library/module build dirs for platform
Makefile = GenMake.PlatformMakefile(self)
self.LibraryBuildDirectoryList = Makefile.GetLibraryBuildDirectoryList()
self.ModuleBuildDirectoryList = Makefile.GetModuleBuildDirectoryList()
self.IsMakeFileCreated = True
## Deal with Shared FixedAtBuild Pcds
#
def CollectFixedAtBuildPcds(self):
for LibAuto in self.LibraryAutoGenList:
FixedAtBuildPcds = {}
ShareFixedAtBuildPcdsSameValue = {}
for Module in LibAuto._ReferenceModules:
for Pcd in set(Module.FixedAtBuildPcds + LibAuto.FixedAtBuildPcds):
DefaultValue = Pcd.DefaultValue
# Cover the case: DSC component override the Pcd value and the Pcd only used in one Lib
if Pcd in Module.LibraryPcdList:
Index = Module.LibraryPcdList.index(Pcd)
DefaultValue = Module.LibraryPcdList[Index].DefaultValue
key = ".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName))
if key not in FixedAtBuildPcds:
ShareFixedAtBuildPcdsSameValue[key] = True
FixedAtBuildPcds[key] = DefaultValue
else:
if FixedAtBuildPcds[key] != DefaultValue:
ShareFixedAtBuildPcdsSameValue[key] = False
for Pcd in LibAuto.FixedAtBuildPcds:
key = ".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName))
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) not in self.NonDynamicPcdDict:
continue
else:
DscPcd = self.NonDynamicPcdDict[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
if DscPcd.Type != TAB_PCDS_FIXED_AT_BUILD:
continue
if key in ShareFixedAtBuildPcdsSameValue and ShareFixedAtBuildPcdsSameValue[key]:
LibAuto.ConstPcd[key] = FixedAtBuildPcds[key]
def CollectVariables(self, DynamicPcdSet):
VpdRegionSize = 0
VpdRegionBase = 0
if self.Workspace.FdfFile:
FdDict = self.Workspace.FdfProfile.FdDict[GlobalData.gFdfParser.CurrentFdName]
for FdRegion in FdDict.RegionList:
for item in FdRegion.RegionDataList:
if self.Platform.VpdToolGuid.strip() and self.Platform.VpdToolGuid in item:
VpdRegionSize = FdRegion.Size
VpdRegionBase = FdRegion.Offset
break
VariableInfo = VariableMgr(self.DscBuildDataObj._GetDefaultStores(), self.DscBuildDataObj._GetSkuIds())
VariableInfo.SetVpdRegionMaxSize(VpdRegionSize)
VariableInfo.SetVpdRegionOffset(VpdRegionBase)
Index = 0
for Pcd in DynamicPcdSet:
pcdname = ".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName))
for SkuName in Pcd.SkuInfoList:
Sku = Pcd.SkuInfoList[SkuName]
SkuId = Sku.SkuId
if SkuId is None or SkuId == '':
continue
if len(Sku.VariableName) > 0:
VariableGuidStructure = Sku.VariableGuidValue
VariableGuid = GuidStructureStringToGuidString(VariableGuidStructure)
for StorageName in Sku.DefaultStoreDict:
VariableInfo.append_variable(var_info(Index, pcdname, StorageName, SkuName, StringToArray(Sku.VariableName), VariableGuid, Sku.VariableOffset, Sku.VariableAttribute, Sku.HiiDefaultValue, Sku.DefaultStoreDict[StorageName], Pcd.DatumType))
Index += 1
return VariableInfo
def UpdateNVStoreMaxSize(self, OrgVpdFile):
if self.VariableInfo:
VpdMapFilePath = os.path.join(self.BuildDir, TAB_FV_DIRECTORY, "%s.map" % self.Platform.VpdToolGuid)
PcdNvStoreDfBuffer = [item for item in self._DynamicPcdList if item.TokenCName == "PcdNvStoreDefaultValueBuffer" and item.TokenSpaceGuidCName == "gEfiMdeModulePkgTokenSpaceGuid"]
if PcdNvStoreDfBuffer:
if os.path.exists(VpdMapFilePath):
OrgVpdFile.Read(VpdMapFilePath)
PcdItems = OrgVpdFile.GetOffset(PcdNvStoreDfBuffer[0])
NvStoreOffset = PcdItems.values()[0].strip() if PcdItems else '0'
else:
EdkLogger.error("build", FILE_READ_FAILURE, "Can not find VPD map file %s to fix up VPD offset." % VpdMapFilePath)
NvStoreOffset = int(NvStoreOffset, 16) if NvStoreOffset.upper().startswith("0X") else int(NvStoreOffset)
default_skuobj = PcdNvStoreDfBuffer[0].SkuInfoList.get(TAB_DEFAULT)
maxsize = self.VariableInfo.VpdRegionSize - NvStoreOffset if self.VariableInfo.VpdRegionSize else len(default_skuobj.DefaultValue.split(","))
var_data = self.VariableInfo.PatchNVStoreDefaultMaxSize(maxsize)
if var_data and default_skuobj:
default_skuobj.DefaultValue = var_data
PcdNvStoreDfBuffer[0].DefaultValue = var_data
PcdNvStoreDfBuffer[0].SkuInfoList.clear()
PcdNvStoreDfBuffer[0].SkuInfoList[TAB_DEFAULT] = default_skuobj
PcdNvStoreDfBuffer[0].MaxDatumSize = str(len(default_skuobj.DefaultValue.split(",")))
return OrgVpdFile
## Collect dynamic PCDs
#
# Gather dynamic PCDs list from each module and their settings from platform
# This interface should be invoked explicitly when platform action is created.
#
def CollectPlatformDynamicPcds(self):
for key in self.Platform.Pcds:
for SinglePcd in GlobalData.MixedPcd:
if (self.Platform.Pcds[key].TokenCName, self.Platform.Pcds[key].TokenSpaceGuidCName) == SinglePcd:
for item in GlobalData.MixedPcd[SinglePcd]:
Pcd_Type = item[0].split('_')[-1]
if (Pcd_Type == self.Platform.Pcds[key].Type) or (Pcd_Type == TAB_PCDS_DYNAMIC_EX and self.Platform.Pcds[key].Type in PCD_DYNAMIC_EX_TYPE_SET) or \
(Pcd_Type == TAB_PCDS_DYNAMIC and self.Platform.Pcds[key].Type in PCD_DYNAMIC_TYPE_SET):
Value = self.Platform.Pcds[key]
Value.TokenCName = self.Platform.Pcds[key].TokenCName + '_' + Pcd_Type
if len(key) == 2:
newkey = (Value.TokenCName, key[1])
elif len(key) == 3:
newkey = (Value.TokenCName, key[1], key[2])
del self.Platform.Pcds[key]
self.Platform.Pcds[newkey] = Value
break
break
# for gathering error information
NoDatumTypePcdList = set()
FdfModuleList = []
for InfName in self._AsBuildInfList:
InfName = mws.join(self.WorkspaceDir, InfName)
FdfModuleList.append(os.path.normpath(InfName))
for F in self.Platform.Modules.keys():
M = ModuleAutoGen(self.Workspace, F, self.BuildTarget, self.ToolChain, self.Arch, self.MetaFile)
#GuidValue.update(M.Guids)
self.Platform.Modules[F].M = M
for PcdFromModule in M.ModulePcdList + M.LibraryPcdList:
# make sure that the "VOID*" kind of datum has MaxDatumSize set
if PcdFromModule.DatumType == TAB_VOID and not PcdFromModule.MaxDatumSize:
NoDatumTypePcdList.add("%s.%s [%s]" % (PcdFromModule.TokenSpaceGuidCName, PcdFromModule.TokenCName, F))
# Check the PCD from Binary INF or Source INF
if M.IsBinaryModule == True:
PcdFromModule.IsFromBinaryInf = True
# Check the PCD from DSC or not
PcdFromModule.IsFromDsc = (PcdFromModule.TokenCName, PcdFromModule.TokenSpaceGuidCName) in self.Platform.Pcds
if PcdFromModule.Type in PCD_DYNAMIC_TYPE_SET or PcdFromModule.Type in PCD_DYNAMIC_EX_TYPE_SET:
if F.Path not in FdfModuleList:
# If one of the Source built modules listed in the DSC is not listed
# in FDF modules, and the INF lists a PCD can only use the PcdsDynamic
# access method (it is only listed in the DEC file that declares the
# PCD as PcdsDynamic), then build tool will report warning message
# notify the PI that they are attempting to build a module that must
# be included in a flash image in order to be functional. These Dynamic
# PCD will not be added into the Database unless it is used by other
# modules that are included in the FDF file.
if PcdFromModule.Type in PCD_DYNAMIC_TYPE_SET and \
PcdFromModule.IsFromBinaryInf == False:
# Print warning message to let the developer make a determine.
continue
# If one of the Source built modules listed in the DSC is not listed in
# FDF modules, and the INF lists a PCD can only use the PcdsDynamicEx
# access method (it is only listed in the DEC file that declares the
# PCD as PcdsDynamicEx), then DO NOT break the build; DO NOT add the
# PCD to the Platform's PCD Database.
if PcdFromModule.Type in PCD_DYNAMIC_EX_TYPE_SET:
continue
#
# If a dynamic PCD used by a PEM module/PEI module & DXE module,
# it should be stored in Pcd PEI database, If a dynamic only
# used by DXE module, it should be stored in DXE PCD database.
# The default Phase is DXE
#
if M.ModuleType in SUP_MODULE_SET_PEI:
PcdFromModule.Phase = "PEI"
if PcdFromModule not in self._DynaPcdList_:
self._DynaPcdList_.append(PcdFromModule)
elif PcdFromModule.Phase == 'PEI':
# overwrite any the same PCD existing, if Phase is PEI
Index = self._DynaPcdList_.index(PcdFromModule)
self._DynaPcdList_[Index] = PcdFromModule
elif PcdFromModule not in self._NonDynaPcdList_:
self._NonDynaPcdList_.append(PcdFromModule)
elif PcdFromModule in self._NonDynaPcdList_ and PcdFromModule.IsFromBinaryInf == True:
Index = self._NonDynaPcdList_.index(PcdFromModule)
if self._NonDynaPcdList_[Index].IsFromBinaryInf == False:
#The PCD from Binary INF will override the same one from source INF
self._NonDynaPcdList_.remove (self._NonDynaPcdList_[Index])
PcdFromModule.Pending = False
self._NonDynaPcdList_.append (PcdFromModule)
DscModuleSet = {os.path.normpath(ModuleInf.Path) for ModuleInf in self.Platform.Modules}
# add the PCD from modules that listed in FDF but not in DSC to Database
for InfName in FdfModuleList:
if InfName not in DscModuleSet:
InfClass = PathClass(InfName)
M = self.BuildDatabase[InfClass, self.Arch, self.BuildTarget, self.ToolChain]
# If a module INF in FDF but not in current arch's DSC module list, it must be module (either binary or source)
# for different Arch. PCDs in source module for different Arch is already added before, so skip the source module here.
# For binary module, if in current arch, we need to list the PCDs into database.
if not M.IsSupportedArch:
continue
# Override the module PCD setting by platform setting
ModulePcdList = self.ApplyPcdSetting(M, M.Pcds)
for PcdFromModule in ModulePcdList:
PcdFromModule.IsFromBinaryInf = True
PcdFromModule.IsFromDsc = False
# Only allow the DynamicEx and Patchable PCD in AsBuild INF
if PcdFromModule.Type not in PCD_DYNAMIC_EX_TYPE_SET and PcdFromModule.Type not in TAB_PCDS_PATCHABLE_IN_MODULE:
EdkLogger.error("build", AUTOGEN_ERROR, "PCD setting error",
File=self.MetaFile,
ExtraData="\n\tExisted %s PCD %s in:\n\t\t%s\n"
% (PcdFromModule.Type, PcdFromModule.TokenCName, InfName))
# make sure that the "VOID*" kind of datum has MaxDatumSize set
if PcdFromModule.DatumType == TAB_VOID and not PcdFromModule.MaxDatumSize:
NoDatumTypePcdList.add("%s.%s [%s]" % (PcdFromModule.TokenSpaceGuidCName, PcdFromModule.TokenCName, InfName))
if M.ModuleType in SUP_MODULE_SET_PEI:
PcdFromModule.Phase = "PEI"
if PcdFromModule not in self._DynaPcdList_ and PcdFromModule.Type in PCD_DYNAMIC_EX_TYPE_SET:
self._DynaPcdList_.append(PcdFromModule)
elif PcdFromModule not in self._NonDynaPcdList_ and PcdFromModule.Type in TAB_PCDS_PATCHABLE_IN_MODULE:
self._NonDynaPcdList_.append(PcdFromModule)
if PcdFromModule in self._DynaPcdList_ and PcdFromModule.Phase == 'PEI' and PcdFromModule.Type in PCD_DYNAMIC_EX_TYPE_SET:
# Overwrite the phase of any the same PCD existing, if Phase is PEI.
# It is to solve the case that a dynamic PCD used by a PEM module/PEI
# module & DXE module at a same time.
# Overwrite the type of the PCDs in source INF by the type of AsBuild
# INF file as DynamicEx.
Index = self._DynaPcdList_.index(PcdFromModule)
self._DynaPcdList_[Index].Phase = PcdFromModule.Phase
self._DynaPcdList_[Index].Type = PcdFromModule.Type
for PcdFromModule in self._NonDynaPcdList_:
# If a PCD is not listed in the DSC file, but binary INF files used by
# this platform all (that use this PCD) list the PCD in a [PatchPcds]
# section, AND all source INF files used by this platform the build
# that use the PCD list the PCD in either a [Pcds] or [PatchPcds]
# section, then the tools must NOT add the PCD to the Platform's PCD
# Database; the build must assign the access method for this PCD as
# PcdsPatchableInModule.
if PcdFromModule not in self._DynaPcdList_:
continue
Index = self._DynaPcdList_.index(PcdFromModule)
if PcdFromModule.IsFromDsc == False and \
PcdFromModule.Type in TAB_PCDS_PATCHABLE_IN_MODULE and \
PcdFromModule.IsFromBinaryInf == True and \
self._DynaPcdList_[Index].IsFromBinaryInf == False:
Index = self._DynaPcdList_.index(PcdFromModule)
self._DynaPcdList_.remove (self._DynaPcdList_[Index])
# print out error information and break the build, if error found
if len(NoDatumTypePcdList) > 0:
NoDatumTypePcdListString = "\n\t\t".join(NoDatumTypePcdList)
EdkLogger.error("build", AUTOGEN_ERROR, "PCD setting error",
File=self.MetaFile,
ExtraData="\n\tPCD(s) without MaxDatumSize:\n\t\t%s\n"
% NoDatumTypePcdListString)
self._NonDynamicPcdList = self._NonDynaPcdList_
self._DynamicPcdList = self._DynaPcdList_
#
# Sort dynamic PCD list to:
# 1) If PCD's datum type is VOID* and value is unicode string which starts with L, the PCD item should
# try to be put header of dynamicd List
# 2) If PCD is HII type, the PCD item should be put after unicode type PCD
#
# The reason of sorting is make sure the unicode string is in double-byte alignment in string table.
#
UnicodePcdArray = set()
HiiPcdArray = set()
OtherPcdArray = set()
VpdPcdDict = {}
VpdFile = VpdInfoFile.VpdInfoFile()
NeedProcessVpdMapFile = False
for pcd in self.Platform.Pcds:
if pcd not in self._PlatformPcds:
self._PlatformPcds[pcd] = self.Platform.Pcds[pcd]
for item in self._PlatformPcds:
if self._PlatformPcds[item].DatumType and self._PlatformPcds[item].DatumType not in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64, TAB_VOID, "BOOLEAN"]:
self._PlatformPcds[item].DatumType = TAB_VOID
if (self.Workspace.ArchList[-1] == self.Arch):
for Pcd in self._DynamicPcdList:
# just pick the a value to determine whether is unicode string type
Sku = Pcd.SkuInfoList.values()[0]
Sku.VpdOffset = Sku.VpdOffset.strip()
if Pcd.DatumType not in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64, TAB_VOID, "BOOLEAN"]:
Pcd.DatumType = TAB_VOID
# if found PCD which datum value is unicode string the insert to left size of UnicodeIndex
# if found HII type PCD then insert to right of UnicodeIndex
if Pcd.Type in [TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_EX_VPD]:
VpdPcdDict[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)] = Pcd
#Collect DynamicHii PCD values and assign it to DynamicExVpd PCD gEfiMdeModulePkgTokenSpaceGuid.PcdNvStoreDefaultValueBuffer
PcdNvStoreDfBuffer = VpdPcdDict.get(("PcdNvStoreDefaultValueBuffer", "gEfiMdeModulePkgTokenSpaceGuid"))
if PcdNvStoreDfBuffer:
self.VariableInfo = self.CollectVariables(self._DynamicPcdList)
vardump = self.VariableInfo.dump()
if vardump:
PcdNvStoreDfBuffer.DefaultValue = vardump
for skuname in PcdNvStoreDfBuffer.SkuInfoList:
PcdNvStoreDfBuffer.SkuInfoList[skuname].DefaultValue = vardump
PcdNvStoreDfBuffer.MaxDatumSize = str(len(vardump.split(",")))
PlatformPcds = sorted(self._PlatformPcds.keys())
#
# Add VPD type PCD into VpdFile and determine whether the VPD PCD need to be fixed up.
#
VpdSkuMap = {}
for PcdKey in PlatformPcds:
Pcd = self._PlatformPcds[PcdKey]
if Pcd.Type in [TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_EX_VPD] and \
PcdKey in VpdPcdDict:
Pcd = VpdPcdDict[PcdKey]
SkuValueMap = {}
DefaultSku = Pcd.SkuInfoList.get(TAB_DEFAULT)
if DefaultSku:
PcdValue = DefaultSku.DefaultValue
if PcdValue not in SkuValueMap:
SkuValueMap[PcdValue] = []
VpdFile.Add(Pcd, TAB_DEFAULT, DefaultSku.VpdOffset)
SkuValueMap[PcdValue].append(DefaultSku)
for (SkuName, Sku) in Pcd.SkuInfoList.items():
Sku.VpdOffset = Sku.VpdOffset.strip()
PcdValue = Sku.DefaultValue
if PcdValue == "":
PcdValue = Pcd.DefaultValue
if Sku.VpdOffset != '*':
if PcdValue.startswith("{"):
Alignment = 8
elif PcdValue.startswith("L"):
Alignment = 2
else:
Alignment = 1
try:
VpdOffset = int(Sku.VpdOffset)
except:
try:
VpdOffset = int(Sku.VpdOffset, 16)
except:
EdkLogger.error("build", FORMAT_INVALID, "Invalid offset value %s for PCD %s.%s." % (Sku.VpdOffset, Pcd.TokenSpaceGuidCName, Pcd.TokenCName))
if VpdOffset % Alignment != 0:
if PcdValue.startswith("{"):
EdkLogger.warn("build", "The offset value of PCD %s.%s is not 8-byte aligned!" %(Pcd.TokenSpaceGuidCName, Pcd.TokenCName), File=self.MetaFile)
else:
EdkLogger.error("build", FORMAT_INVALID, 'The offset value of PCD %s.%s should be %s-byte aligned.' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Alignment))
if PcdValue not in SkuValueMap:
SkuValueMap[PcdValue] = []
VpdFile.Add(Pcd, SkuName, Sku.VpdOffset)
SkuValueMap[PcdValue].append(Sku)
# if the offset of a VPD is *, then it need to be fixed up by third party tool.
if not NeedProcessVpdMapFile and Sku.VpdOffset == "*":
NeedProcessVpdMapFile = True
if self.Platform.VpdToolGuid is None or self.Platform.VpdToolGuid == '':
EdkLogger.error("Build", FILE_NOT_FOUND, \
"Fail to find third-party BPDG tool to process VPD PCDs. BPDG Guid tool need to be defined in tools_def.txt and VPD_TOOL_GUID need to be provided in DSC file.")
VpdSkuMap[PcdKey] = SkuValueMap
#
# Fix the PCDs define in VPD PCD section that never referenced by module.
# An example is PCD for signature usage.
#
for DscPcd in PlatformPcds:
DscPcdEntry = self._PlatformPcds[DscPcd]
if DscPcdEntry.Type in [TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_EX_VPD]:
if not (self.Platform.VpdToolGuid is None or self.Platform.VpdToolGuid == ''):
FoundFlag = False
for VpdPcd in VpdFile._VpdArray:
# This PCD has been referenced by module
if (VpdPcd.TokenSpaceGuidCName == DscPcdEntry.TokenSpaceGuidCName) and \
(VpdPcd.TokenCName == DscPcdEntry.TokenCName):
FoundFlag = True
# Not found, it should be signature
if not FoundFlag :
# just pick the a value to determine whether is unicode string type
SkuValueMap = {}
SkuObjList = DscPcdEntry.SkuInfoList.items()
DefaultSku = DscPcdEntry.SkuInfoList.get(TAB_DEFAULT)
if DefaultSku:
defaultindex = SkuObjList.index((TAB_DEFAULT, DefaultSku))
SkuObjList[0], SkuObjList[defaultindex] = SkuObjList[defaultindex], SkuObjList[0]
for (SkuName, Sku) in SkuObjList:
Sku.VpdOffset = Sku.VpdOffset.strip()
# Need to iterate DEC pcd information to get the value & datumtype
for eachDec in self.PackageList:
for DecPcd in eachDec.Pcds:
DecPcdEntry = eachDec.Pcds[DecPcd]
if (DecPcdEntry.TokenSpaceGuidCName == DscPcdEntry.TokenSpaceGuidCName) and \
(DecPcdEntry.TokenCName == DscPcdEntry.TokenCName):
# Print warning message to let the developer make a determine.
EdkLogger.warn("build", "Unreferenced vpd pcd used!",
File=self.MetaFile, \
ExtraData = "PCD: %s.%s used in the DSC file %s is unreferenced." \
%(DscPcdEntry.TokenSpaceGuidCName, DscPcdEntry.TokenCName, self.Platform.MetaFile.Path))
DscPcdEntry.DatumType = DecPcdEntry.DatumType
DscPcdEntry.DefaultValue = DecPcdEntry.DefaultValue
DscPcdEntry.TokenValue = DecPcdEntry.TokenValue
DscPcdEntry.TokenSpaceGuidValue = eachDec.Guids[DecPcdEntry.TokenSpaceGuidCName]
# Only fix the value while no value provided in DSC file.
if not Sku.DefaultValue:
DscPcdEntry.SkuInfoList[DscPcdEntry.SkuInfoList.keys()[0]].DefaultValue = DecPcdEntry.DefaultValue
if DscPcdEntry not in self._DynamicPcdList:
self._DynamicPcdList.append(DscPcdEntry)
Sku.VpdOffset = Sku.VpdOffset.strip()
PcdValue = Sku.DefaultValue
if PcdValue == "":
PcdValue = DscPcdEntry.DefaultValue
if Sku.VpdOffset != '*':
if PcdValue.startswith("{"):
Alignment = 8
elif PcdValue.startswith("L"):
Alignment = 2
else:
Alignment = 1
try:
VpdOffset = int(Sku.VpdOffset)
except:
try:
VpdOffset = int(Sku.VpdOffset, 16)
except:
EdkLogger.error("build", FORMAT_INVALID, "Invalid offset value %s for PCD %s.%s." % (Sku.VpdOffset, DscPcdEntry.TokenSpaceGuidCName, DscPcdEntry.TokenCName))
if VpdOffset % Alignment != 0:
if PcdValue.startswith("{"):
EdkLogger.warn("build", "The offset value of PCD %s.%s is not 8-byte aligned!" %(DscPcdEntry.TokenSpaceGuidCName, DscPcdEntry.TokenCName), File=self.MetaFile)
else:
EdkLogger.error("build", FORMAT_INVALID, 'The offset value of PCD %s.%s should be %s-byte aligned.' % (DscPcdEntry.TokenSpaceGuidCName, DscPcdEntry.TokenCName, Alignment))
if PcdValue not in SkuValueMap:
SkuValueMap[PcdValue] = []
VpdFile.Add(DscPcdEntry, SkuName, Sku.VpdOffset)
SkuValueMap[PcdValue].append(Sku)
if not NeedProcessVpdMapFile and Sku.VpdOffset == "*":
NeedProcessVpdMapFile = True
if DscPcdEntry.DatumType == TAB_VOID and PcdValue.startswith("L"):
UnicodePcdArray.add(DscPcdEntry)
elif len(Sku.VariableName) > 0:
HiiPcdArray.add(DscPcdEntry)
else:
OtherPcdArray.add(DscPcdEntry)
# if the offset of a VPD is *, then it need to be fixed up by third party tool.
VpdSkuMap[DscPcd] = SkuValueMap
if (self.Platform.FlashDefinition is None or self.Platform.FlashDefinition == '') and \
VpdFile.GetCount() != 0:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"Fail to get FLASH_DEFINITION definition in DSC file %s which is required when DSC contains VPD PCD." % str(self.Platform.MetaFile))
if VpdFile.GetCount() != 0:
self.FixVpdOffset(VpdFile)
self.FixVpdOffset(self.UpdateNVStoreMaxSize(VpdFile))
# Process VPD map file generated by third party BPDG tool
if NeedProcessVpdMapFile:
VpdMapFilePath = os.path.join(self.BuildDir, TAB_FV_DIRECTORY, "%s.map" % self.Platform.VpdToolGuid)
if os.path.exists(VpdMapFilePath):
VpdFile.Read(VpdMapFilePath)
# Fixup "*" offset
for pcd in VpdSkuMap:
vpdinfo = VpdFile.GetVpdInfo(pcd)
if vpdinfo is None:
# just pick the a value to determine whether is unicode string type
continue
for pcdvalue in VpdSkuMap[pcd]:
for sku in VpdSkuMap[pcd][pcdvalue]:
for item in vpdinfo:
if item[2] == pcdvalue:
sku.VpdOffset = item[1]
else:
EdkLogger.error("build", FILE_READ_FAILURE, "Can not find VPD map file %s to fix up VPD offset." % VpdMapFilePath)
# Delete the DynamicPcdList At the last time enter into this function
for Pcd in self._DynamicPcdList:
# just pick the a value to determine whether is unicode string type
Sku = Pcd.SkuInfoList.values()[0]
Sku.VpdOffset = Sku.VpdOffset.strip()
if Pcd.DatumType not in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64, TAB_VOID, "BOOLEAN"]:
Pcd.DatumType = TAB_VOID
PcdValue = Sku.DefaultValue
if Pcd.DatumType == TAB_VOID and PcdValue.startswith("L"):
# if found PCD which datum value is unicode string the insert to left size of UnicodeIndex
UnicodePcdArray.add(Pcd)
elif len(Sku.VariableName) > 0:
# if found HII type PCD then insert to right of UnicodeIndex
HiiPcdArray.add(Pcd)
else:
OtherPcdArray.add(Pcd)
del self._DynamicPcdList[:]
self._DynamicPcdList.extend(list(UnicodePcdArray))
self._DynamicPcdList.extend(list(HiiPcdArray))
self._DynamicPcdList.extend(list(OtherPcdArray))
allskuset = [(SkuName, Sku.SkuId) for pcd in self._DynamicPcdList for (SkuName, Sku) in pcd.SkuInfoList.items()]
for pcd in self._DynamicPcdList:
if len(pcd.SkuInfoList) == 1:
for (SkuName, SkuId) in allskuset:
if type(SkuId) in (str, unicode) and eval(SkuId) == 0 or SkuId == 0:
continue
pcd.SkuInfoList[SkuName] = copy.deepcopy(pcd.SkuInfoList[TAB_DEFAULT])
pcd.SkuInfoList[SkuName].SkuId = SkuId
self.AllPcdList = self._NonDynamicPcdList + self._DynamicPcdList
def FixVpdOffset(self, VpdFile ):
FvPath = os.path.join(self.BuildDir, TAB_FV_DIRECTORY)
if not os.path.exists(FvPath):
try:
os.makedirs(FvPath)
except:
EdkLogger.error("build", FILE_WRITE_FAILURE, "Fail to create FV folder under %s" % self.BuildDir)
VpdFilePath = os.path.join(FvPath, "%s.txt" % self.Platform.VpdToolGuid)
if VpdFile.Write(VpdFilePath):
# retrieve BPDG tool's path from tool_def.txt according to VPD_TOOL_GUID defined in DSC file.
BPDGToolName = None
for ToolDef in self.ToolDefinition.values():
if TAB_GUID in ToolDef and ToolDef[TAB_GUID] == self.Platform.VpdToolGuid:
if "PATH" not in ToolDef:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "PATH attribute was not provided for BPDG guid tool %s in tools_def.txt" % self.Platform.VpdToolGuid)
BPDGToolName = ToolDef["PATH"]
break
# Call third party GUID BPDG tool.
if BPDGToolName is not None:
VpdInfoFile.CallExtenalBPDGTool(BPDGToolName, VpdFilePath)
else:
EdkLogger.error("Build", FILE_NOT_FOUND, "Fail to find third-party BPDG tool to process VPD PCDs. BPDG Guid tool need to be defined in tools_def.txt and VPD_TOOL_GUID need to be provided in DSC file.")
## Return the platform build data object
def _GetPlatform(self):
if self._Platform is None:
self._Platform = self.BuildDatabase[self.MetaFile, self.Arch, self.BuildTarget, self.ToolChain]
return self._Platform
## Return platform name
def _GetName(self):
return self.Platform.PlatformName
## Return the meta file GUID
def _GetGuid(self):
return self.Platform.Guid
## Return the platform version
def _GetVersion(self):
return self.Platform.Version
## Return the FDF file name
def _GetFdfFile(self):
if self._FdfFile is None:
if self.Workspace.FdfFile != "":
self._FdfFile= mws.join(self.WorkspaceDir, self.Workspace.FdfFile)
else:
self._FdfFile = ''
return self._FdfFile
## Return the build output directory platform specifies
def _GetOutputDir(self):
return self.Platform.OutputDirectory
## Return the directory to store all intermediate and final files built
def _GetBuildDir(self):
if self._BuildDir is None:
if os.path.isabs(self.OutputDir):
self._BuildDir = path.join(
path.abspath(self.OutputDir),
self.BuildTarget + "_" + self.ToolChain,
)
else:
self._BuildDir = path.join(
self.WorkspaceDir,
self.OutputDir,
self.BuildTarget + "_" + self.ToolChain,
)
GlobalData.gBuildDirectory = self._BuildDir
return self._BuildDir
## Return directory of platform makefile
#
# @retval string Makefile directory
#
def _GetMakeFileDir(self):
if self._MakeFileDir is None:
self._MakeFileDir = path.join(self.BuildDir, self.Arch)
return self._MakeFileDir
## Return build command string
#
# @retval string Build command string
#
def _GetBuildCommand(self):
if self._BuildCommand is None:
self._BuildCommand = []
if "MAKE" in self.ToolDefinition and "PATH" in self.ToolDefinition["MAKE"]:
self._BuildCommand += SplitOption(self.ToolDefinition["MAKE"]["PATH"])
if "FLAGS" in self.ToolDefinition["MAKE"]:
NewOption = self.ToolDefinition["MAKE"]["FLAGS"].strip()
if NewOption != '':
self._BuildCommand += SplitOption(NewOption)
if "MAKE" in self.EdkIIBuildOption:
if "FLAGS" in self.EdkIIBuildOption["MAKE"]:
Flags = self.EdkIIBuildOption["MAKE"]["FLAGS"]
if Flags.startswith('='):
self._BuildCommand = [self._BuildCommand[0]] + [Flags[1:]]
else:
self._BuildCommand.append(Flags)
return self._BuildCommand
## Get tool chain definition
#
# Get each tool defition for given tool chain from tools_def.txt and platform
#
def _GetToolDefinition(self):
if self._ToolDefinitions is None:
ToolDefinition = self.Workspace.ToolDef.ToolsDefTxtDictionary
if TAB_TOD_DEFINES_COMMAND_TYPE not in self.Workspace.ToolDef.ToolsDefTxtDatabase:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "No tools found in configuration",
ExtraData="[%s]" % self.MetaFile)
self._ToolDefinitions = {}
DllPathList = set()
for Def in ToolDefinition:
Target, Tag, Arch, Tool, Attr = Def.split("_")
if Target != self.BuildTarget or Tag != self.ToolChain or Arch != self.Arch:
continue
Value = ToolDefinition[Def]
# don't record the DLL
if Attr == "DLL":
DllPathList.add(Value)
continue
if Tool not in self._ToolDefinitions:
self._ToolDefinitions[Tool] = {}
self._ToolDefinitions[Tool][Attr] = Value
ToolsDef = ''
if GlobalData.gOptions.SilentMode and "MAKE" in self._ToolDefinitions:
if "FLAGS" not in self._ToolDefinitions["MAKE"]:
self._ToolDefinitions["MAKE"]["FLAGS"] = ""
self._ToolDefinitions["MAKE"]["FLAGS"] += " -s"
MakeFlags = ''
for Tool in self._ToolDefinitions:
for Attr in self._ToolDefinitions[Tool]:
Value = self._ToolDefinitions[Tool][Attr]
if Tool in self.BuildOption and Attr in self.BuildOption[Tool]:
# check if override is indicated
if self.BuildOption[Tool][Attr].startswith('='):
Value = self.BuildOption[Tool][Attr][1:]
else:
if Attr != 'PATH':
Value += " " + self.BuildOption[Tool][Attr]
else:
Value = self.BuildOption[Tool][Attr]
if Attr == "PATH":
# Don't put MAKE definition in the file
if Tool != "MAKE":
ToolsDef += "%s = %s\n" % (Tool, Value)
elif Attr != "DLL":
# Don't put MAKE definition in the file
if Tool == "MAKE":
if Attr == "FLAGS":
MakeFlags = Value
else:
ToolsDef += "%s_%s = %s\n" % (Tool, Attr, Value)
ToolsDef += "\n"
SaveFileOnChange(self.ToolDefinitionFile, ToolsDef)
for DllPath in DllPathList:
os.environ["PATH"] = DllPath + os.pathsep + os.environ["PATH"]
os.environ["MAKE_FLAGS"] = MakeFlags
return self._ToolDefinitions
## Return the paths of tools
def _GetToolDefFile(self):
if self._ToolDefFile is None:
self._ToolDefFile = os.path.join(self.MakeFileDir, "TOOLS_DEF." + self.Arch)
return self._ToolDefFile
## Retrieve the toolchain family of given toolchain tag. Default to 'MSFT'.
def _GetToolChainFamily(self):
if self._ToolChainFamily is None:
ToolDefinition = self.Workspace.ToolDef.ToolsDefTxtDatabase
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition \
or self.ToolChain not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][self.ToolChain]:
EdkLogger.verbose("No tool chain family found in configuration for %s. Default to MSFT." \
% self.ToolChain)
self._ToolChainFamily = "MSFT"
else:
self._ToolChainFamily = ToolDefinition[TAB_TOD_DEFINES_FAMILY][self.ToolChain]
return self._ToolChainFamily
def _GetBuildRuleFamily(self):
if self._BuildRuleFamily is None:
ToolDefinition = self.Workspace.ToolDef.ToolsDefTxtDatabase
if TAB_TOD_DEFINES_BUILDRULEFAMILY not in ToolDefinition \
or self.ToolChain not in ToolDefinition[TAB_TOD_DEFINES_BUILDRULEFAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_BUILDRULEFAMILY][self.ToolChain]:
EdkLogger.verbose("No tool chain family found in configuration for %s. Default to MSFT." \
% self.ToolChain)
self._BuildRuleFamily = "MSFT"
else:
self._BuildRuleFamily = ToolDefinition[TAB_TOD_DEFINES_BUILDRULEFAMILY][self.ToolChain]
return self._BuildRuleFamily
## Return the build options specific for all modules in this platform
def _GetBuildOptions(self):
if self._BuildOption is None:
self._BuildOption = self._ExpandBuildOption(self.Platform.BuildOptions)
return self._BuildOption
## Return the build options specific for EDK modules in this platform
def _GetEdkBuildOptions(self):
if self._EdkBuildOption is None:
self._EdkBuildOption = self._ExpandBuildOption(self.Platform.BuildOptions, EDK_NAME)
return self._EdkBuildOption
## Return the build options specific for EDKII modules in this platform
def _GetEdkIIBuildOptions(self):
if self._EdkIIBuildOption is None:
self._EdkIIBuildOption = self._ExpandBuildOption(self.Platform.BuildOptions, EDKII_NAME)
return self._EdkIIBuildOption
## Parse build_rule.txt in Conf Directory.
#
# @retval BuildRule object
#
def _GetBuildRule(self):
if self._BuildRule is None:
BuildRuleFile = None
if TAB_TAT_DEFINES_BUILD_RULE_CONF in self.Workspace.TargetTxt.TargetTxtDictionary:
BuildRuleFile = self.Workspace.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_BUILD_RULE_CONF]
if not BuildRuleFile:
BuildRuleFile = gDefaultBuildRuleFile
self._BuildRule = BuildRule(BuildRuleFile)
if self._BuildRule._FileVersion == "":
self._BuildRule._FileVersion = AutoGenReqBuildRuleVerNum
else:
if self._BuildRule._FileVersion < AutoGenReqBuildRuleVerNum :
# If Build Rule's version is less than the version number required by the tools, halting the build.
EdkLogger.error("build", AUTOGEN_ERROR,
ExtraData="The version number [%s] of build_rule.txt is less than the version number required by the AutoGen.(the minimum required version number is [%s])"\
% (self._BuildRule._FileVersion, AutoGenReqBuildRuleVerNum))
return self._BuildRule
## Summarize the packages used by modules in this platform
def _GetPackageList(self):
if self._PackageList is None:
self._PackageList = set()
for La in self.LibraryAutoGenList:
self._PackageList.update(La.DependentPackageList)
for Ma in self.ModuleAutoGenList:
self._PackageList.update(Ma.DependentPackageList)
#Collect package set information from INF of FDF
PkgSet = set()
for ModuleFile in self._AsBuildModuleList:
if ModuleFile in self.Platform.Modules:
continue
ModuleData = self.BuildDatabase[ModuleFile, self.Arch, self.BuildTarget, self.ToolChain]
PkgSet.update(ModuleData.Packages)
self._PackageList = list(self._PackageList) + list (PkgSet)
return self._PackageList
def _GetNonDynamicPcdDict(self):
if self._NonDynamicPcdDict:
return self._NonDynamicPcdDict
for Pcd in self.NonDynamicPcdList:
self._NonDynamicPcdDict[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)] = Pcd
return self._NonDynamicPcdDict
## Get list of non-dynamic PCDs
def _GetNonDynamicPcdList(self):
if self._NonDynamicPcdList is None:
self.CollectPlatformDynamicPcds()
return self._NonDynamicPcdList
## Get list of dynamic PCDs
def _GetDynamicPcdList(self):
if self._DynamicPcdList is None:
self.CollectPlatformDynamicPcds()
return self._DynamicPcdList
## Generate Token Number for all PCD
def _GetPcdTokenNumbers(self):
if self._PcdTokenNumber is None:
self._PcdTokenNumber = OrderedDict()
TokenNumber = 1
#
# Make the Dynamic and DynamicEx PCD use within different TokenNumber area.
# Such as:
#
# Dynamic PCD:
# TokenNumber 0 ~ 10
# DynamicEx PCD:
# TokeNumber 11 ~ 20
#
for Pcd in self.DynamicPcdList:
if Pcd.Phase == "PEI" and Pcd.Type in PCD_DYNAMIC_TYPE_SET:
EdkLogger.debug(EdkLogger.DEBUG_5, "%s %s (%s) -> %d" % (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Phase, TokenNumber))
self._PcdTokenNumber[Pcd.TokenCName, Pcd.TokenSpaceGuidCName] = TokenNumber
TokenNumber += 1
for Pcd in self.DynamicPcdList:
if Pcd.Phase == "PEI" and Pcd.Type in PCD_DYNAMIC_EX_TYPE_SET:
EdkLogger.debug(EdkLogger.DEBUG_5, "%s %s (%s) -> %d" % (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Phase, TokenNumber))
self._PcdTokenNumber[Pcd.TokenCName, Pcd.TokenSpaceGuidCName] = TokenNumber
TokenNumber += 1
for Pcd in self.DynamicPcdList:
if Pcd.Phase == "DXE" and Pcd.Type in PCD_DYNAMIC_TYPE_SET:
EdkLogger.debug(EdkLogger.DEBUG_5, "%s %s (%s) -> %d" % (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Phase, TokenNumber))
self._PcdTokenNumber[Pcd.TokenCName, Pcd.TokenSpaceGuidCName] = TokenNumber
TokenNumber += 1
for Pcd in self.DynamicPcdList:
if Pcd.Phase == "DXE" and Pcd.Type in PCD_DYNAMIC_EX_TYPE_SET:
EdkLogger.debug(EdkLogger.DEBUG_5, "%s %s (%s) -> %d" % (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Phase, TokenNumber))
self._PcdTokenNumber[Pcd.TokenCName, Pcd.TokenSpaceGuidCName] = TokenNumber
TokenNumber += 1
for Pcd in self.NonDynamicPcdList:
self._PcdTokenNumber[Pcd.TokenCName, Pcd.TokenSpaceGuidCName] = TokenNumber
TokenNumber += 1
return self._PcdTokenNumber
## Summarize ModuleAutoGen objects of all modules/libraries to be built for this platform
def _GetAutoGenObjectList(self):
self._ModuleAutoGenList = []
self._LibraryAutoGenList = []
for ModuleFile in self.Platform.Modules:
Ma = ModuleAutoGen(
self.Workspace,
ModuleFile,
self.BuildTarget,
self.ToolChain,
self.Arch,
self.MetaFile
)
if Ma not in self._ModuleAutoGenList:
self._ModuleAutoGenList.append(Ma)
for La in Ma.LibraryAutoGenList:
if La not in self._LibraryAutoGenList:
self._LibraryAutoGenList.append(La)
if Ma not in La._ReferenceModules:
La._ReferenceModules.append(Ma)
## Summarize ModuleAutoGen objects of all modules to be built for this platform
def _GetModuleAutoGenList(self):
if self._ModuleAutoGenList is None:
self._GetAutoGenObjectList()
return self._ModuleAutoGenList
## Summarize ModuleAutoGen objects of all libraries to be built for this platform
def _GetLibraryAutoGenList(self):
if self._LibraryAutoGenList is None:
self._GetAutoGenObjectList()
return self._LibraryAutoGenList
## Test if a module is supported by the platform
#
# An error will be raised directly if the module or its arch is not supported
# by the platform or current configuration
#
def ValidModule(self, Module):
return Module in self.Platform.Modules or Module in self.Platform.LibraryInstances \
or Module in self._AsBuildModuleList
## Resolve the library classes in a module to library instances
#
# This method will not only resolve library classes but also sort the library
# instances according to the dependency-ship.
#
# @param Module The module from which the library classes will be resolved
#
# @retval library_list List of library instances sorted
#
def ApplyLibraryInstance(self, Module):
# Cover the case that the binary INF file is list in the FDF file but not DSC file, return empty list directly
if str(Module) not in self.Platform.Modules:
return []
return GetModuleLibInstances(Module,
self.Platform,
self.BuildDatabase,
self.Arch,
self.BuildTarget,
self.ToolChain,
self.MetaFile,
EdkLogger)
## Override PCD setting (type, value, ...)
#
# @param ToPcd The PCD to be overrided
# @param FromPcd The PCD overrideing from
#
def _OverridePcd(self, ToPcd, FromPcd, Module="", Msg="", Library=""):
#
# in case there's PCDs coming from FDF file, which have no type given.
# at this point, ToPcd.Type has the type found from dependent
# package
#
TokenCName = ToPcd.TokenCName
for PcdItem in GlobalData.MixedPcd:
if (ToPcd.TokenCName, ToPcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem]:
TokenCName = PcdItem[0]
break
if FromPcd is not None:
if ToPcd.Pending and FromPcd.Type:
ToPcd.Type = FromPcd.Type
elif ToPcd.Type and FromPcd.Type\
and ToPcd.Type != FromPcd.Type and ToPcd.Type in FromPcd.Type:
if ToPcd.Type.strip() == TAB_PCDS_DYNAMIC_EX:
ToPcd.Type = FromPcd.Type
elif ToPcd.Type and FromPcd.Type \
and ToPcd.Type != FromPcd.Type:
if Library:
Module = str(Module) + " 's library file (" + str(Library) + ")"
EdkLogger.error("build", OPTION_CONFLICT, "Mismatched PCD type",
ExtraData="%s.%s is used as [%s] in module %s, but as [%s] in %s."\
% (ToPcd.TokenSpaceGuidCName, TokenCName,
ToPcd.Type, Module, FromPcd.Type, Msg),
File=self.MetaFile)
if FromPcd.MaxDatumSize:
ToPcd.MaxDatumSize = FromPcd.MaxDatumSize
ToPcd.MaxSizeUserSet = FromPcd.MaxDatumSize
if FromPcd.DefaultValue:
ToPcd.DefaultValue = FromPcd.DefaultValue
if FromPcd.TokenValue:
ToPcd.TokenValue = FromPcd.TokenValue
if FromPcd.DatumType:
ToPcd.DatumType = FromPcd.DatumType
if FromPcd.SkuInfoList:
ToPcd.SkuInfoList = FromPcd.SkuInfoList
# Add Flexible PCD format parse
if ToPcd.DefaultValue:
try:
ToPcd.DefaultValue = ValueExpressionEx(ToPcd.DefaultValue, ToPcd.DatumType, self._GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %(ToPcd.TokenSpaceGuidCName, ToPcd.TokenCName, ToPcd.DefaultValue, Value),
File=self.MetaFile)
# check the validation of datum
IsValid, Cause = CheckPcdDatum(ToPcd.DatumType, ToPcd.DefaultValue)
if not IsValid:
EdkLogger.error('build', FORMAT_INVALID, Cause, File=self.MetaFile,
ExtraData="%s.%s" % (ToPcd.TokenSpaceGuidCName, TokenCName))
ToPcd.validateranges = FromPcd.validateranges
ToPcd.validlists = FromPcd.validlists
ToPcd.expressions = FromPcd.expressions
if FromPcd is not None and ToPcd.DatumType == TAB_VOID and not ToPcd.MaxDatumSize:
EdkLogger.debug(EdkLogger.DEBUG_9, "No MaxDatumSize specified for PCD %s.%s" \
% (ToPcd.TokenSpaceGuidCName, TokenCName))
Value = ToPcd.DefaultValue
if not Value:
ToPcd.MaxDatumSize = '1'
elif Value[0] == 'L':
ToPcd.MaxDatumSize = str((len(Value) - 2) * 2)
elif Value[0] == '{':
ToPcd.MaxDatumSize = str(len(Value.split(',')))
else:
ToPcd.MaxDatumSize = str(len(Value) - 1)
# apply default SKU for dynamic PCDS if specified one is not available
if (ToPcd.Type in PCD_DYNAMIC_TYPE_SET or ToPcd.Type in PCD_DYNAMIC_EX_TYPE_SET) \
and not ToPcd.SkuInfoList:
if self.Platform.SkuName in self.Platform.SkuIds:
SkuName = self.Platform.SkuName
else:
SkuName = TAB_DEFAULT
ToPcd.SkuInfoList = {
SkuName : SkuInfoClass(SkuName, self.Platform.SkuIds[SkuName][0], '', '', '', '', '', ToPcd.DefaultValue)
}
## Apply PCD setting defined platform to a module
#
# @param Module The module from which the PCD setting will be overrided
#
# @retval PCD_list The list PCDs with settings from platform
#
def ApplyPcdSetting(self, Module, Pcds, Library=""):
# for each PCD in module
for Name, Guid in Pcds:
PcdInModule = Pcds[Name, Guid]
# find out the PCD setting in platform
if (Name, Guid) in self.Platform.Pcds:
PcdInPlatform = self.Platform.Pcds[Name, Guid]
else:
PcdInPlatform = None
# then override the settings if any
self._OverridePcd(PcdInModule, PcdInPlatform, Module, Msg="DSC PCD sections", Library=Library)
# resolve the VariableGuid value
for SkuId in PcdInModule.SkuInfoList:
Sku = PcdInModule.SkuInfoList[SkuId]
if Sku.VariableGuid == '': continue
Sku.VariableGuidValue = GuidValue(Sku.VariableGuid, self.PackageList, self.MetaFile.Path)
if Sku.VariableGuidValue is None:
PackageList = "\n\t".join(str(P) for P in self.PackageList)
EdkLogger.error(
'build',
RESOURCE_NOT_AVAILABLE,
"Value of GUID [%s] is not found in" % Sku.VariableGuid,
ExtraData=PackageList + "\n\t(used with %s.%s from module %s)" \
% (Guid, Name, str(Module)),
File=self.MetaFile
)
# override PCD settings with module specific setting
if Module in self.Platform.Modules:
PlatformModule = self.Platform.Modules[str(Module)]
for Key in PlatformModule.Pcds:
Flag = False
if Key in Pcds:
ToPcd = Pcds[Key]
Flag = True
elif Key in GlobalData.MixedPcd:
for PcdItem in GlobalData.MixedPcd[Key]:
if PcdItem in Pcds:
ToPcd = Pcds[PcdItem]
Flag = True
break
if Flag:
self._OverridePcd(ToPcd, PlatformModule.Pcds[Key], Module, Msg="DSC Components Module scoped PCD section", Library=Library)
# use PCD value to calculate the MaxDatumSize when it is not specified
for Name, Guid in Pcds:
Pcd = Pcds[Name, Guid]
if Pcd.DatumType == TAB_VOID and not Pcd.MaxDatumSize:
Pcd.MaxSizeUserSet = None
Value = Pcd.DefaultValue
if not Value:
Pcd.MaxDatumSize = '1'
elif Value[0] == 'L':
Pcd.MaxDatumSize = str((len(Value) - 2) * 2)
elif Value[0] == '{':
Pcd.MaxDatumSize = str(len(Value.split(',')))
else:
Pcd.MaxDatumSize = str(len(Value) - 1)
return Pcds.values()
## Resolve library names to library modules
#
# (for Edk.x modules)
#
# @param Module The module from which the library names will be resolved
#
# @retval library_list The list of library modules
#
def ResolveLibraryReference(self, Module):
EdkLogger.verbose("")
EdkLogger.verbose("Library instances of module [%s] [%s]:" % (str(Module), self.Arch))
LibraryConsumerList = [Module]
# "CompilerStub" is a must for Edk modules
if Module.Libraries:
Module.Libraries.append("CompilerStub")
LibraryList = []
while len(LibraryConsumerList) > 0:
M = LibraryConsumerList.pop()
for LibraryName in M.Libraries:
Library = self.Platform.LibraryClasses[LibraryName, ':dummy:']
if Library is None:
for Key in self.Platform.LibraryClasses.data:
if LibraryName.upper() == Key.upper():
Library = self.Platform.LibraryClasses[Key, ':dummy:']
break
if Library is None:
EdkLogger.warn("build", "Library [%s] is not found" % LibraryName, File=str(M),
ExtraData="\t%s [%s]" % (str(Module), self.Arch))
continue
if Library not in LibraryList:
LibraryList.append(Library)
LibraryConsumerList.append(Library)
EdkLogger.verbose("\t" + LibraryName + " : " + str(Library) + ' ' + str(type(Library)))
return LibraryList
## Calculate the priority value of the build option
#
# @param Key Build option definition contain: TARGET_TOOLCHAIN_ARCH_COMMANDTYPE_ATTRIBUTE
#
# @retval Value Priority value based on the priority list.
#
def CalculatePriorityValue(self, Key):
Target, ToolChain, Arch, CommandType, Attr = Key.split('_')
PriorityValue = 0x11111
if Target == "*":
PriorityValue &= 0x01111
if ToolChain == "*":
PriorityValue &= 0x10111
if Arch == "*":
PriorityValue &= 0x11011
if CommandType == "*":
PriorityValue &= 0x11101
if Attr == "*":
PriorityValue &= 0x11110
return self.PrioList["0x%0.5x" % PriorityValue]
## Expand * in build option key
#
# @param Options Options to be expanded
#
# @retval options Options expanded
#
def _ExpandBuildOption(self, Options, ModuleStyle=None):
BuildOptions = {}
FamilyMatch = False
FamilyIsNull = True
OverrideList = {}
#
# Construct a list contain the build options which need override.
#
for Key in Options:
#
# Key[0] -- tool family
# Key[1] -- TARGET_TOOLCHAIN_ARCH_COMMANDTYPE_ATTRIBUTE
#
if (Key[0] == self.BuildRuleFamily and
(ModuleStyle is None or len(Key) < 3 or (len(Key) > 2 and Key[2] == ModuleStyle))):
Target, ToolChain, Arch, CommandType, Attr = Key[1].split('_')
if (Target == self.BuildTarget or Target == "*") and\
(ToolChain == self.ToolChain or ToolChain == "*") and\
(Arch == self.Arch or Arch == "*") and\
Options[Key].startswith("="):
if OverrideList.get(Key[1]) is not None:
OverrideList.pop(Key[1])
OverrideList[Key[1]] = Options[Key]
#
# Use the highest priority value.
#
if (len(OverrideList) >= 2):
KeyList = OverrideList.keys()
for Index in range(len(KeyList)):
NowKey = KeyList[Index]
Target1, ToolChain1, Arch1, CommandType1, Attr1 = NowKey.split("_")
for Index1 in range(len(KeyList) - Index - 1):
NextKey = KeyList[Index1 + Index + 1]
#
# Compare two Key, if one is included by another, choose the higher priority one
#
Target2, ToolChain2, Arch2, CommandType2, Attr2 = NextKey.split("_")
if (Target1 == Target2 or Target1 == "*" or Target2 == "*") and\
(ToolChain1 == ToolChain2 or ToolChain1 == "*" or ToolChain2 == "*") and\
(Arch1 == Arch2 or Arch1 == "*" or Arch2 == "*") and\
(CommandType1 == CommandType2 or CommandType1 == "*" or CommandType2 == "*") and\
(Attr1 == Attr2 or Attr1 == "*" or Attr2 == "*"):
if self.CalculatePriorityValue(NowKey) > self.CalculatePriorityValue(NextKey):
if Options.get((self.BuildRuleFamily, NextKey)) is not None:
Options.pop((self.BuildRuleFamily, NextKey))
else:
if Options.get((self.BuildRuleFamily, NowKey)) is not None:
Options.pop((self.BuildRuleFamily, NowKey))
for Key in Options:
if ModuleStyle is not None and len (Key) > 2:
# Check Module style is EDK or EDKII.
# Only append build option for the matched style module.
if ModuleStyle == EDK_NAME and Key[2] != EDK_NAME:
continue
elif ModuleStyle == EDKII_NAME and Key[2] != EDKII_NAME:
continue
Family = Key[0]
Target, Tag, Arch, Tool, Attr = Key[1].split("_")
# if tool chain family doesn't match, skip it
if Tool in self.ToolDefinition and Family != "":
FamilyIsNull = False
if self.ToolDefinition[Tool].get(TAB_TOD_DEFINES_BUILDRULEFAMILY, "") != "":
if Family != self.ToolDefinition[Tool][TAB_TOD_DEFINES_BUILDRULEFAMILY]:
continue
elif Family != self.ToolDefinition[Tool][TAB_TOD_DEFINES_FAMILY]:
continue
FamilyMatch = True
# expand any wildcard
if Target == "*" or Target == self.BuildTarget:
if Tag == "*" or Tag == self.ToolChain:
if Arch == "*" or Arch == self.Arch:
if Tool not in BuildOptions:
BuildOptions[Tool] = {}
if Attr != "FLAGS" or Attr not in BuildOptions[Tool] or Options[Key].startswith('='):
BuildOptions[Tool][Attr] = Options[Key]
else:
# append options for the same tool except PATH
if Attr != 'PATH':
BuildOptions[Tool][Attr] += " " + Options[Key]
else:
BuildOptions[Tool][Attr] = Options[Key]
# Build Option Family has been checked, which need't to be checked again for family.
if FamilyMatch or FamilyIsNull:
return BuildOptions
for Key in Options:
if ModuleStyle is not None and len (Key) > 2:
# Check Module style is EDK or EDKII.
# Only append build option for the matched style module.
if ModuleStyle == EDK_NAME and Key[2] != EDK_NAME:
continue
elif ModuleStyle == EDKII_NAME and Key[2] != EDKII_NAME:
continue
Family = Key[0]
Target, Tag, Arch, Tool, Attr = Key[1].split("_")
# if tool chain family doesn't match, skip it
if Tool not in self.ToolDefinition or Family == "":
continue
# option has been added before
if Family != self.ToolDefinition[Tool][TAB_TOD_DEFINES_FAMILY]:
continue
# expand any wildcard
if Target == "*" or Target == self.BuildTarget:
if Tag == "*" or Tag == self.ToolChain:
if Arch == "*" or Arch == self.Arch:
if Tool not in BuildOptions:
BuildOptions[Tool] = {}
if Attr != "FLAGS" or Attr not in BuildOptions[Tool] or Options[Key].startswith('='):
BuildOptions[Tool][Attr] = Options[Key]
else:
# append options for the same tool except PATH
if Attr != 'PATH':
BuildOptions[Tool][Attr] += " " + Options[Key]
else:
BuildOptions[Tool][Attr] = Options[Key]
return BuildOptions
## Append build options in platform to a module
#
# @param Module The module to which the build options will be appened
#
# @retval options The options appended with build options in platform
#
def ApplyBuildOption(self, Module):
# Get the different options for the different style module
if Module.AutoGenVersion < 0x00010005:
PlatformOptions = self.EdkBuildOption
ModuleTypeOptions = self.Platform.GetBuildOptionsByModuleType(EDK_NAME, Module.ModuleType)
else:
PlatformOptions = self.EdkIIBuildOption
ModuleTypeOptions = self.Platform.GetBuildOptionsByModuleType(EDKII_NAME, Module.ModuleType)
ModuleTypeOptions = self._ExpandBuildOption(ModuleTypeOptions)
ModuleOptions = self._ExpandBuildOption(Module.BuildOptions)
if Module in self.Platform.Modules:
PlatformModule = self.Platform.Modules[str(Module)]
PlatformModuleOptions = self._ExpandBuildOption(PlatformModule.BuildOptions)
else:
PlatformModuleOptions = {}
BuildRuleOrder = None
for Options in [self.ToolDefinition, ModuleOptions, PlatformOptions, ModuleTypeOptions, PlatformModuleOptions]:
for Tool in Options:
for Attr in Options[Tool]:
if Attr == TAB_TOD_DEFINES_BUILDRULEORDER:
BuildRuleOrder = Options[Tool][Attr]
AllTools = set(ModuleOptions.keys() + PlatformOptions.keys() +
PlatformModuleOptions.keys() + ModuleTypeOptions.keys() +
self.ToolDefinition.keys())
BuildOptions = defaultdict(lambda: defaultdict(str))
for Tool in AllTools:
for Options in [self.ToolDefinition, ModuleOptions, PlatformOptions, ModuleTypeOptions, PlatformModuleOptions]:
if Tool not in Options:
continue
for Attr in Options[Tool]:
#
# Do not generate it in Makefile
#
if Attr == TAB_TOD_DEFINES_BUILDRULEORDER:
continue
Value = Options[Tool][Attr]
# check if override is indicated
if Value.startswith('='):
BuildOptions[Tool][Attr] = mws.handleWsMacro(Value[1:])
else:
if Attr != 'PATH':
BuildOptions[Tool][Attr] += " " + mws.handleWsMacro(Value)
else:
BuildOptions[Tool][Attr] = mws.handleWsMacro(Value)
if Module.AutoGenVersion < 0x00010005 and self.Workspace.UniFlag is not None:
#
# Override UNI flag only for EDK module.
#
BuildOptions['BUILD']['FLAGS'] = self.Workspace.UniFlag
return BuildOptions, BuildRuleOrder
Platform = property(_GetPlatform)
Name = property(_GetName)
Guid = property(_GetGuid)
Version = property(_GetVersion)
OutputDir = property(_GetOutputDir)
BuildDir = property(_GetBuildDir)
MakeFileDir = property(_GetMakeFileDir)
FdfFile = property(_GetFdfFile)
PcdTokenNumber = property(_GetPcdTokenNumbers) # (TokenCName, TokenSpaceGuidCName) : GeneratedTokenNumber
DynamicPcdList = property(_GetDynamicPcdList) # [(TokenCName1, TokenSpaceGuidCName1), (TokenCName2, TokenSpaceGuidCName2), ...]
NonDynamicPcdList = property(_GetNonDynamicPcdList) # [(TokenCName1, TokenSpaceGuidCName1), (TokenCName2, TokenSpaceGuidCName2), ...]
NonDynamicPcdDict = property(_GetNonDynamicPcdDict)
PackageList = property(_GetPackageList)
ToolDefinition = property(_GetToolDefinition) # toolcode : tool path
ToolDefinitionFile = property(_GetToolDefFile) # toolcode : lib path
ToolChainFamily = property(_GetToolChainFamily)
BuildRuleFamily = property(_GetBuildRuleFamily)
BuildOption = property(_GetBuildOptions) # toolcode : option
EdkBuildOption = property(_GetEdkBuildOptions) # edktoolcode : option
EdkIIBuildOption = property(_GetEdkIIBuildOptions) # edkiitoolcode : option
BuildCommand = property(_GetBuildCommand)
BuildRule = property(_GetBuildRule)
ModuleAutoGenList = property(_GetModuleAutoGenList)
LibraryAutoGenList = property(_GetLibraryAutoGenList)
GenFdsCommand = property(_GenFdsCommand)
#
# extend lists contained in a dictionary with lists stored in another dictionary
# if CopyToDict is not derived from DefaultDict(list) then this may raise exception
#
def ExtendCopyDictionaryLists(CopyToDict, CopyFromDict):
for Key in CopyFromDict:
CopyToDict[Key].extend(CopyFromDict[Key])
## ModuleAutoGen class
#
# This class encapsules the AutoGen behaviors for the build tools. In addition to
# the generation of AutoGen.h and AutoGen.c, it will generate *.depex file according
# to the [depex] section in module's inf file.
#
class ModuleAutoGen(AutoGen):
# call super().__init__ then call the worker function with different parameter count
def __init__(self, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs):
try:
self._Init
except:
super(ModuleAutoGen, self).__init__(Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs)
self._InitWorker(Workspace, MetaFile, Target, Toolchain, Arch, *args)
self._Init = True
## Cache the timestamps of metafiles of every module in a class variable
#
TimeDict = {}
def __new__(cls, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs):
obj = super(ModuleAutoGen, cls).__new__(cls, Workspace, MetaFile, Target, Toolchain, Arch, *args, **kwargs)
# check if this module is employed by active platform
if not PlatformAutoGen(Workspace, args[0], Target, Toolchain, Arch).ValidModule(MetaFile):
EdkLogger.verbose("Module [%s] for [%s] is not employed by active platform\n" \
% (MetaFile, Arch))
return None
return obj
## Initialize ModuleAutoGen
#
# @param Workspace EdkIIWorkspaceBuild object
# @param ModuleFile The path of module file
# @param Target Build target (DEBUG, RELEASE)
# @param Toolchain Name of tool chain
# @param Arch The arch the module supports
# @param PlatformFile Platform meta-file
#
def _InitWorker(self, Workspace, ModuleFile, Target, Toolchain, Arch, PlatformFile):
EdkLogger.debug(EdkLogger.DEBUG_9, "AutoGen module [%s] [%s]" % (ModuleFile, Arch))
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (ModuleFile, Arch, Toolchain, Target)
self.Workspace = Workspace
self.WorkspaceDir = Workspace.WorkspaceDir
self._GuidDict = Workspace._GuidDict
self.MetaFile = ModuleFile
self.PlatformInfo = PlatformAutoGen(Workspace, PlatformFile, Target, Toolchain, Arch)
self.SourceDir = self.MetaFile.SubDir
self.SourceDir = mws.relpath(self.SourceDir, self.WorkspaceDir)
self.SourceOverrideDir = None
# use overrided path defined in DSC file
if self.MetaFile.Key in GlobalData.gOverrideDir:
self.SourceOverrideDir = GlobalData.gOverrideDir[self.MetaFile.Key]
self.ToolChain = Toolchain
self.BuildTarget = Target
self.Arch = Arch
self.ToolChainFamily = self.PlatformInfo.ToolChainFamily
self.BuildRuleFamily = self.PlatformInfo.BuildRuleFamily
self.IsMakeFileCreated = False
self.IsCodeFileCreated = False
self.IsAsBuiltInfCreated = False
self.DepexGenerated = False
self.BuildDatabase = self.Workspace.BuildDatabase
self.BuildRuleOrder = None
self.BuildTime = 0
self._Module = None
self._Name = None
self._Guid = None
self._Version = None
self._ModuleType = None
self._ComponentType = None
self._PcdIsDriver = None
self._AutoGenVersion = None
self._LibraryFlag = None
self._CustomMakefile = None
self._Macro = None
self._BuildDir = None
self._OutputDir = None
self._FfsOutputDir = None
self._DebugDir = None
self._MakeFileDir = None
self._IncludePathList = None
self._AutoGenFileList = None
self._UnicodeFileList = None
self._VfrFileList = None
self._IdfFileList = None
self._SourceFileList = None
self._ObjectFileList = None
self._BinaryFileList = None
self._DependentPackageList = None
self._DependentLibraryList = None
self._LibraryAutoGenList = None
self._DerivedPackageList = None
self._ModulePcdList = None
self._LibraryPcdList = None
self._PcdComments = OrderedListDict()
self._GuidList = None
self._GuidsUsedByPcd = None
self._GuidComments = OrderedListDict()
self._ProtocolList = None
self._ProtocolComments = OrderedListDict()
self._PpiList = None
self._PpiComments = OrderedListDict()
self._DepexDict = None
self._DepexExpressionDict = None
self._BuildOption = None
self._BuildOptionIncPathList = None
self._BuildTargets = None
self._IntroBuildTargetList = None
self._FinalBuildTargetList = None
self._FileTypes = None
self._BuildRules = None
self._TimeStampPath = None
self.AutoGenDepSet = set()
## The Modules referenced to this Library
# Only Library has this attribute
self._ReferenceModules = []
## Store the FixedAtBuild Pcds
#
self._FixedAtBuildPcds = []
self.ConstPcd = {}
##Store the VOID* type FixedAtBuild Pcds
#
self._FixedPcdVoidTypeDict = {}
def __repr__(self):
return "%s [%s]" % (self.MetaFile, self.Arch)
# Get FixedAtBuild Pcds of this Module
def _GetFixedAtBuildPcds(self):
if self._FixedAtBuildPcds:
return self._FixedAtBuildPcds
for Pcd in self.ModulePcdList:
if Pcd.Type != TAB_PCDS_FIXED_AT_BUILD:
continue
if Pcd not in self._FixedAtBuildPcds:
self._FixedAtBuildPcds.append(Pcd)
return self._FixedAtBuildPcds
def _GetFixedAtBuildVoidTypePcds(self):
if self._FixedPcdVoidTypeDict:
return self._FixedPcdVoidTypeDict
for Pcd in self.ModulePcdList:
if Pcd.Type == TAB_PCDS_FIXED_AT_BUILD and Pcd.DatumType == TAB_VOID:
if '{}.{}'.format(Pcd.TokenSpaceGuidCName, Pcd.TokenCName) not in self._FixedPcdVoidTypeDict:
self._FixedPcdVoidTypeDict['{}.{}'.format(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)] = Pcd.DefaultValue
return self._FixedPcdVoidTypeDict
def _GetUniqueBaseName(self):
BaseName = self.Name
for Module in self.PlatformInfo.ModuleAutoGenList:
if Module.MetaFile == self.MetaFile:
continue
if Module.Name == self.Name:
if uuid.UUID(Module.Guid) == uuid.UUID(self.Guid):
EdkLogger.error("build", FILE_DUPLICATED, 'Modules have same BaseName and FILE_GUID:\n'
' %s\n %s' % (Module.MetaFile, self.MetaFile))
BaseName = '%s_%s' % (self.Name, self.Guid)
return BaseName
# Macros could be used in build_rule.txt (also Makefile)
def _GetMacros(self):
if self._Macro is None:
self._Macro = OrderedDict()
self._Macro["WORKSPACE" ] = self.WorkspaceDir
self._Macro["MODULE_NAME" ] = self.Name
self._Macro["MODULE_NAME_GUID" ] = self.UniqueBaseName
self._Macro["MODULE_GUID" ] = self.Guid
self._Macro["MODULE_VERSION" ] = self.Version
self._Macro["MODULE_TYPE" ] = self.ModuleType
self._Macro["MODULE_FILE" ] = str(self.MetaFile)
self._Macro["MODULE_FILE_BASE_NAME" ] = self.MetaFile.BaseName
self._Macro["MODULE_RELATIVE_DIR" ] = self.SourceDir
self._Macro["MODULE_DIR" ] = self.SourceDir
self._Macro["BASE_NAME" ] = self.Name
self._Macro["ARCH" ] = self.Arch
self._Macro["TOOLCHAIN" ] = self.ToolChain
self._Macro["TOOLCHAIN_TAG" ] = self.ToolChain
self._Macro["TOOL_CHAIN_TAG" ] = self.ToolChain
self._Macro["TARGET" ] = self.BuildTarget
self._Macro["BUILD_DIR" ] = self.PlatformInfo.BuildDir
self._Macro["BIN_DIR" ] = os.path.join(self.PlatformInfo.BuildDir, self.Arch)
self._Macro["LIB_DIR" ] = os.path.join(self.PlatformInfo.BuildDir, self.Arch)
self._Macro["MODULE_BUILD_DIR" ] = self.BuildDir
self._Macro["OUTPUT_DIR" ] = self.OutputDir
self._Macro["DEBUG_DIR" ] = self.DebugDir
self._Macro["DEST_DIR_OUTPUT" ] = self.OutputDir
self._Macro["DEST_DIR_DEBUG" ] = self.DebugDir
self._Macro["PLATFORM_NAME" ] = self.PlatformInfo.Name
self._Macro["PLATFORM_GUID" ] = self.PlatformInfo.Guid
self._Macro["PLATFORM_VERSION" ] = self.PlatformInfo.Version
self._Macro["PLATFORM_RELATIVE_DIR" ] = self.PlatformInfo.SourceDir
self._Macro["PLATFORM_DIR" ] = mws.join(self.WorkspaceDir, self.PlatformInfo.SourceDir)
self._Macro["PLATFORM_OUTPUT_DIR" ] = self.PlatformInfo.OutputDir
self._Macro["FFS_OUTPUT_DIR" ] = self.FfsOutputDir
return self._Macro
## Return the module build data object
def _GetModule(self):
if self._Module is None:
self._Module = self.Workspace.BuildDatabase[self.MetaFile, self.Arch, self.BuildTarget, self.ToolChain]
return self._Module
## Return the module name
def _GetBaseName(self):
return self.Module.BaseName
## Return the module DxsFile if exist
def _GetDxsFile(self):
return self.Module.DxsFile
## Return the module SourceOverridePath
def _GetSourceOverridePath(self):
return self.Module.SourceOverridePath
## Return the module meta-file GUID
def _GetGuid(self):
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the realy path
# in DSC. The overridden GUID can be retrieved from file name
#
if os.path.basename(self.MetaFile.File) != os.path.basename(self.MetaFile.Path):
#
# Length of GUID is 36
#
return os.path.basename(self.MetaFile.Path)[:36]
return self.Module.Guid
## Return the module version
def _GetVersion(self):
return self.Module.Version
## Return the module type
def _GetModuleType(self):
return self.Module.ModuleType
## Return the component type (for Edk.x style of module)
def _GetComponentType(self):
return self.Module.ComponentType
## Return the build type
def _GetBuildType(self):
return self.Module.BuildType
## Return the PCD_IS_DRIVER setting
def _GetPcdIsDriver(self):
return self.Module.PcdIsDriver
## Return the autogen version, i.e. module meta-file version
def _GetAutoGenVersion(self):
return self.Module.AutoGenVersion
## Check if the module is library or not
def _IsLibrary(self):
if self._LibraryFlag is None:
self._LibraryFlag = True if self.Module.LibraryClass else False
return self._LibraryFlag
## Check if the module is binary module or not
def _IsBinaryModule(self):
return self.Module.IsBinaryModule
## Return the directory to store intermediate files of the module
def _GetBuildDir(self):
if self._BuildDir is None:
self._BuildDir = path.join(
self.PlatformInfo.BuildDir,
self.Arch,
self.SourceDir,
self.MetaFile.BaseName
)
CreateDirectory(self._BuildDir)
return self._BuildDir
## Return the directory to store the intermediate object files of the mdoule
def _GetOutputDir(self):
if self._OutputDir is None:
self._OutputDir = path.join(self.BuildDir, "OUTPUT")
CreateDirectory(self._OutputDir)
return self._OutputDir
## Return the directory to store ffs file
def _GetFfsOutputDir(self):
if self._FfsOutputDir is None:
if GlobalData.gFdfParser is not None:
self._FfsOutputDir = path.join(self.PlatformInfo.BuildDir, TAB_FV_DIRECTORY, "Ffs", self.Guid + self.Name)
else:
self._FfsOutputDir = ''
return self._FfsOutputDir
## Return the directory to store auto-gened source files of the mdoule
def _GetDebugDir(self):
if self._DebugDir is None:
self._DebugDir = path.join(self.BuildDir, "DEBUG")
CreateDirectory(self._DebugDir)
return self._DebugDir
## Return the path of custom file
def _GetCustomMakefile(self):
if self._CustomMakefile is None:
self._CustomMakefile = {}
for Type in self.Module.CustomMakefile:
if Type in gMakeTypeMap:
MakeType = gMakeTypeMap[Type]
else:
MakeType = 'nmake'
if self.SourceOverrideDir is not None:
File = os.path.join(self.SourceOverrideDir, self.Module.CustomMakefile[Type])
if not os.path.exists(File):
File = os.path.join(self.SourceDir, self.Module.CustomMakefile[Type])
else:
File = os.path.join(self.SourceDir, self.Module.CustomMakefile[Type])
self._CustomMakefile[MakeType] = File
return self._CustomMakefile
## Return the directory of the makefile
#
# @retval string The directory string of module's makefile
#
def _GetMakeFileDir(self):
return self.BuildDir
## Return build command string
#
# @retval string Build command string
#
def _GetBuildCommand(self):
return self.PlatformInfo.BuildCommand
## Get object list of all packages the module and its dependent libraries belong to
#
# @retval list The list of package object
#
def _GetDerivedPackageList(self):
PackageList = []
for M in [self.Module] + self.DependentLibraryList:
for Package in M.Packages:
if Package in PackageList:
continue
PackageList.append(Package)
return PackageList
## Get the depex string
#
# @return : a string contain all depex expresion.
def _GetDepexExpresionString(self):
DepexStr = ''
DepexList = []
## DPX_SOURCE IN Define section.
if self.Module.DxsFile:
return DepexStr
for M in [self.Module] + self.DependentLibraryList:
Filename = M.MetaFile.Path
InfObj = InfSectionParser.InfSectionParser(Filename)
DepexExpresionList = InfObj.GetDepexExpresionList()
for DepexExpresion in DepexExpresionList:
for key in DepexExpresion:
Arch, ModuleType = key
DepexExpr = [x for x in DepexExpresion[key] if not str(x).startswith('#')]
# the type of build module is USER_DEFINED.
# All different DEPEX section tags would be copied into the As Built INF file
# and there would be separate DEPEX section tags
if self.ModuleType.upper() == SUP_MODULE_USER_DEFINED:
if (Arch.upper() == self.Arch.upper()) and (ModuleType.upper() != TAB_ARCH_COMMON):
DepexList.append({(Arch, ModuleType): DepexExpr})
else:
if Arch.upper() == TAB_ARCH_COMMON or \
(Arch.upper() == self.Arch.upper() and \
ModuleType.upper() in [TAB_ARCH_COMMON, self.ModuleType.upper()]):
DepexList.append({(Arch, ModuleType): DepexExpr})
#the type of build module is USER_DEFINED.
if self.ModuleType.upper() == SUP_MODULE_USER_DEFINED:
for Depex in DepexList:
for key in Depex:
DepexStr += '[Depex.%s.%s]\n' % key
DepexStr += '\n'.join('# '+ val for val in Depex[key])
DepexStr += '\n\n'
if not DepexStr:
return '[Depex.%s]\n' % self.Arch
return DepexStr
#the type of build module not is USER_DEFINED.
Count = 0
for Depex in DepexList:
Count += 1
if DepexStr != '':
DepexStr += ' AND '
DepexStr += '('
for D in Depex.values():
DepexStr += ' '.join(val for val in D)
Index = DepexStr.find('END')
if Index > -1 and Index == len(DepexStr) - 3:
DepexStr = DepexStr[:-3]
DepexStr = DepexStr.strip()
DepexStr += ')'
if Count == 1:
DepexStr = DepexStr.lstrip('(').rstrip(')').strip()
if not DepexStr:
return '[Depex.%s]\n' % self.Arch
return '[Depex.%s]\n# ' % self.Arch + DepexStr
## Merge dependency expression
#
# @retval list The token list of the dependency expression after parsed
#
def _GetDepexTokenList(self):
if self._DepexDict is None:
self._DepexDict = {}
if self.DxsFile or self.IsLibrary or TAB_DEPENDENCY_EXPRESSION_FILE in self.FileTypes:
return self._DepexDict
self._DepexDict[self.ModuleType] = []
self._GetFixedAtBuildVoidTypePcds()
for ModuleType in self._DepexDict:
DepexList = self._DepexDict[ModuleType]
#
# Append depex from dependent libraries, if not "BEFORE", "AFTER" expresion
#
for M in [self.Module] + self.DependentLibraryList:
Inherited = False
for D in M.Depex[self.Arch, ModuleType]:
if DepexList != []:
DepexList.append('AND')
DepexList.append('(')
#replace D with value if D is FixedAtBuild PCD
NewList = []
for item in D:
if '.' not in item:
NewList.append(item)
else:
if item not in self._FixedPcdVoidTypeDict:
EdkLogger.error("build", FORMAT_INVALID, "{} used in [Depex] section should be used as FixedAtBuild type and VOID* datum type in the module.".format(item))
else:
Value = self._FixedPcdVoidTypeDict[item]
if len(Value.split(',')) != 16:
EdkLogger.error("build", FORMAT_INVALID,
"{} used in [Depex] section should be used as FixedAtBuild type and VOID* datum type and 16 bytes in the module.".format(item))
NewList.append(Value)
DepexList.extend(NewList)
if DepexList[-1] == 'END': # no need of a END at this time
DepexList.pop()
DepexList.append(')')
Inherited = True
if Inherited:
EdkLogger.verbose("DEPEX[%s] (+%s) = %s" % (self.Name, M.BaseName, DepexList))
if 'BEFORE' in DepexList or 'AFTER' in DepexList:
break
if len(DepexList) > 0:
EdkLogger.verbose('')
return self._DepexDict
## Merge dependency expression
#
# @retval list The token list of the dependency expression after parsed
#
def _GetDepexExpressionTokenList(self):
if self._DepexExpressionDict is None:
self._DepexExpressionDict = {}
if self.DxsFile or self.IsLibrary or TAB_DEPENDENCY_EXPRESSION_FILE in self.FileTypes:
return self._DepexExpressionDict
self._DepexExpressionDict[self.ModuleType] = ''
for ModuleType in self._DepexExpressionDict:
DepexExpressionString = self._DepexExpressionDict[ModuleType]
#
# Append depex from dependent libraries, if not "BEFORE", "AFTER" expresion
#
for M in [self.Module] + self.DependentLibraryList:
Inherited = False
for D in M.DepexExpression[self.Arch, ModuleType]:
if DepexExpressionString != '':
DepexExpressionString += ' AND '
DepexExpressionString += '('
DepexExpressionString += D
DepexExpressionString = DepexExpressionString.rstrip('END').strip()
DepexExpressionString += ')'
Inherited = True
if Inherited:
EdkLogger.verbose("DEPEX[%s] (+%s) = %s" % (self.Name, M.BaseName, DepexExpressionString))
if 'BEFORE' in DepexExpressionString or 'AFTER' in DepexExpressionString:
break
if len(DepexExpressionString) > 0:
EdkLogger.verbose('')
self._DepexExpressionDict[ModuleType] = DepexExpressionString
return self._DepexExpressionDict
# Get the tiano core user extension, it is contain dependent library.
# @retval: a list contain tiano core userextension.
#
def _GetTianoCoreUserExtensionList(self):
TianoCoreUserExtentionList = []
for M in [self.Module] + self.DependentLibraryList:
Filename = M.MetaFile.Path
InfObj = InfSectionParser.InfSectionParser(Filename)
TianoCoreUserExtenList = InfObj.GetUserExtensionTianoCore()
for TianoCoreUserExtent in TianoCoreUserExtenList:
for Section in TianoCoreUserExtent:
ItemList = Section.split(TAB_SPLIT)
Arch = self.Arch
if len(ItemList) == 4:
Arch = ItemList[3]
if Arch.upper() == TAB_ARCH_COMMON or Arch.upper() == self.Arch.upper():
TianoCoreList = []
TianoCoreList.extend([TAB_SECTION_START + Section + TAB_SECTION_END])
TianoCoreList.extend(TianoCoreUserExtent[Section][:])
TianoCoreList.append('\n')
TianoCoreUserExtentionList.append(TianoCoreList)
return TianoCoreUserExtentionList
## Return the list of specification version required for the module
#
# @retval list The list of specification defined in module file
#
def _GetSpecification(self):
return self.Module.Specification
## Tool option for the module build
#
# @param PlatformInfo The object of PlatformBuildInfo
# @retval dict The dict containing valid options
#
def _GetModuleBuildOption(self):
if self._BuildOption is None:
self._BuildOption, self.BuildRuleOrder = self.PlatformInfo.ApplyBuildOption(self.Module)
if self.BuildRuleOrder:
self.BuildRuleOrder = ['.%s' % Ext for Ext in self.BuildRuleOrder.split()]
return self._BuildOption
## Get include path list from tool option for the module build
#
# @retval list The include path list
#
def _GetBuildOptionIncPathList(self):
if self._BuildOptionIncPathList is None:
#
# Regular expression for finding Include Directories, the difference between MSFT and INTEL/GCC/RVCT
# is the former use /I , the Latter used -I to specify include directories
#
if self.PlatformInfo.ToolChainFamily in ('MSFT'):
BuildOptIncludeRegEx = gBuildOptIncludePatternMsft
elif self.PlatformInfo.ToolChainFamily in ('INTEL', 'GCC', 'RVCT'):
BuildOptIncludeRegEx = gBuildOptIncludePatternOther
else:
#
# New ToolChainFamily, don't known whether there is option to specify include directories
#
self._BuildOptionIncPathList = []
return self._BuildOptionIncPathList
BuildOptionIncPathList = []
for Tool in ('CC', 'PP', 'VFRPP', 'ASLPP', 'ASLCC', 'APP', 'ASM'):
try:
FlagOption = self.BuildOption[Tool]['FLAGS']
except KeyError:
FlagOption = ''
if self.PlatformInfo.ToolChainFamily != 'RVCT':
IncPathList = [NormPath(Path, self.Macros) for Path in BuildOptIncludeRegEx.findall(FlagOption)]
else:
#
# RVCT may specify a list of directory seperated by commas
#
IncPathList = []
for Path in BuildOptIncludeRegEx.findall(FlagOption):
PathList = GetSplitList(Path, TAB_COMMA_SPLIT)
IncPathList.extend(NormPath(PathEntry, self.Macros) for PathEntry in PathList)
#
# EDK II modules must not reference header files outside of the packages they depend on or
# within the module's directory tree. Report error if violation.
#
if self.AutoGenVersion >= 0x00010005:
for Path in IncPathList:
if (Path not in self.IncludePathList) and (CommonPath([Path, self.MetaFile.Dir]) != self.MetaFile.Dir):
ErrMsg = "The include directory for the EDK II module in this line is invalid %s specified in %s FLAGS '%s'" % (Path, Tool, FlagOption)
EdkLogger.error("build",
PARAMETER_INVALID,
ExtraData=ErrMsg,
File=str(self.MetaFile))
BuildOptionIncPathList += IncPathList
self._BuildOptionIncPathList = BuildOptionIncPathList
return self._BuildOptionIncPathList
## Return a list of files which can be built from source
#
# What kind of files can be built is determined by build rules in
# $(CONF_DIRECTORY)/build_rule.txt and toolchain family.
#
def _GetSourceFileList(self):
if self._SourceFileList is None:
self._SourceFileList = []
ToolChainTagSet = {"", "*", self.ToolChain}
ToolChainFamilySet = {"", "*", self.ToolChainFamily, self.BuildRuleFamily}
for F in self.Module.Sources:
# match tool chain
if F.TagName not in ToolChainTagSet:
EdkLogger.debug(EdkLogger.DEBUG_9, "The toolchain [%s] for processing file [%s] is found, "
"but [%s] is currently used" % (F.TagName, str(F), self.ToolChain))
continue
# match tool chain family or build rule family
if F.ToolChainFamily not in ToolChainFamilySet:
EdkLogger.debug(
EdkLogger.DEBUG_0,
"The file [%s] must be built by tools of [%s], " \
"but current toolchain family is [%s], buildrule family is [%s]" \
% (str(F), F.ToolChainFamily, self.ToolChainFamily, self.BuildRuleFamily))
continue
# add the file path into search path list for file including
if F.Dir not in self.IncludePathList and self.AutoGenVersion >= 0x00010005:
self.IncludePathList.insert(0, F.Dir)
self._SourceFileList.append(F)
self._MatchBuildRuleOrder(self._SourceFileList)
for F in self._SourceFileList:
self._ApplyBuildRule(F, TAB_UNKNOWN_FILE)
return self._SourceFileList
def _MatchBuildRuleOrder(self, FileList):
Order_Dict = {}
self._GetModuleBuildOption()
for SingleFile in FileList:
if self.BuildRuleOrder and SingleFile.Ext in self.BuildRuleOrder and SingleFile.Ext in self.BuildRules:
key = SingleFile.Path.split(SingleFile.Ext)[0]
if key in Order_Dict:
Order_Dict[key].append(SingleFile.Ext)
else:
Order_Dict[key] = [SingleFile.Ext]
RemoveList = []
for F in Order_Dict:
if len(Order_Dict[F]) > 1:
Order_Dict[F].sort(key=lambda i: self.BuildRuleOrder.index(i))
for Ext in Order_Dict[F][1:]:
RemoveList.append(F + Ext)
for item in RemoveList:
FileList.remove(item)
return FileList
## Return the list of unicode files
def _GetUnicodeFileList(self):
if self._UnicodeFileList is None:
if TAB_UNICODE_FILE in self.FileTypes:
self._UnicodeFileList = self.FileTypes[TAB_UNICODE_FILE]
else:
self._UnicodeFileList = []
return self._UnicodeFileList
## Return the list of vfr files
def _GetVfrFileList(self):
if self._VfrFileList is None:
if TAB_VFR_FILE in self.FileTypes:
self._VfrFileList = self.FileTypes[TAB_VFR_FILE]
else:
self._VfrFileList = []
return self._VfrFileList
## Return the list of Image Definition files
def _GetIdfFileList(self):
if self._IdfFileList is None:
if TAB_IMAGE_FILE in self.FileTypes:
self._IdfFileList = self.FileTypes[TAB_IMAGE_FILE]
else:
self._IdfFileList = []
return self._IdfFileList
## Return a list of files which can be built from binary
#
# "Build" binary files are just to copy them to build directory.
#
# @retval list The list of files which can be built later
#
def _GetBinaryFiles(self):
if self._BinaryFileList is None:
self._BinaryFileList = []
for F in self.Module.Binaries:
if F.Target not in [TAB_ARCH_COMMON, '*'] and F.Target != self.BuildTarget:
continue
self._BinaryFileList.append(F)
self._ApplyBuildRule(F, F.Type)
return self._BinaryFileList
def _GetBuildRules(self):
if self._BuildRules is None:
BuildRules = {}
BuildRuleDatabase = self.PlatformInfo.BuildRule
for Type in BuildRuleDatabase.FileTypeList:
#first try getting build rule by BuildRuleFamily
RuleObject = BuildRuleDatabase[Type, self.BuildType, self.Arch, self.BuildRuleFamily]
if not RuleObject:
# build type is always module type, but ...
if self.ModuleType != self.BuildType:
RuleObject = BuildRuleDatabase[Type, self.ModuleType, self.Arch, self.BuildRuleFamily]
#second try getting build rule by ToolChainFamily
if not RuleObject:
RuleObject = BuildRuleDatabase[Type, self.BuildType, self.Arch, self.ToolChainFamily]
if not RuleObject:
# build type is always module type, but ...
if self.ModuleType != self.BuildType:
RuleObject = BuildRuleDatabase[Type, self.ModuleType, self.Arch, self.ToolChainFamily]
if not RuleObject:
continue
RuleObject = RuleObject.Instantiate(self.Macros)
BuildRules[Type] = RuleObject
for Ext in RuleObject.SourceFileExtList:
BuildRules[Ext] = RuleObject
self._BuildRules = BuildRules
return self._BuildRules
def _ApplyBuildRule(self, File, FileType):
if self._BuildTargets is None:
self._IntroBuildTargetList = set()
self._FinalBuildTargetList = set()
self._BuildTargets = defaultdict(set)
self._FileTypes = defaultdict(set)
SubDirectory = os.path.join(self.OutputDir, File.SubDir)
if not os.path.exists(SubDirectory):
CreateDirectory(SubDirectory)
LastTarget = None
RuleChain = set()
SourceList = [File]
Index = 0
#
# Make sure to get build rule order value
#
self._GetModuleBuildOption()
while Index < len(SourceList):
Source = SourceList[Index]
Index = Index + 1
if Source != File:
CreateDirectory(Source.Dir)
if File.IsBinary and File == Source and self._BinaryFileList is not None and File in self._BinaryFileList:
# Skip all files that are not binary libraries
if not self.IsLibrary:
continue
RuleObject = self.BuildRules[TAB_DEFAULT_BINARY_FILE]
elif FileType in self.BuildRules:
RuleObject = self.BuildRules[FileType]
elif Source.Ext in self.BuildRules:
RuleObject = self.BuildRules[Source.Ext]
else:
# stop at no more rules
if LastTarget:
self._FinalBuildTargetList.add(LastTarget)
break
FileType = RuleObject.SourceFileType
self._FileTypes[FileType].add(Source)
# stop at STATIC_LIBRARY for library
if self.IsLibrary and FileType == TAB_STATIC_LIBRARY:
if LastTarget:
self._FinalBuildTargetList.add(LastTarget)
break
Target = RuleObject.Apply(Source, self.BuildRuleOrder)
if not Target:
if LastTarget:
self._FinalBuildTargetList.add(LastTarget)
break
elif not Target.Outputs:
# Only do build for target with outputs
self._FinalBuildTargetList.add(Target)
self._BuildTargets[FileType].add(Target)
if not Source.IsBinary and Source == File:
self._IntroBuildTargetList.add(Target)
# to avoid cyclic rule
if FileType in RuleChain:
break
RuleChain.add(FileType)
SourceList.extend(Target.Outputs)
LastTarget = Target
FileType = TAB_UNKNOWN_FILE
def _GetTargets(self):
if self._BuildTargets is None:
self._IntroBuildTargetList = set()
self._FinalBuildTargetList = set()
self._BuildTargets = defaultdict(set)
self._FileTypes = defaultdict(set)
#TRICK: call _GetSourceFileList to apply build rule for source files
if self.SourceFileList:
pass
#TRICK: call _GetBinaryFileList to apply build rule for binary files
if self.BinaryFileList:
pass
return self._BuildTargets
def _GetIntroTargetList(self):
self._GetTargets()
return self._IntroBuildTargetList
def _GetFinalTargetList(self):
self._GetTargets()
return self._FinalBuildTargetList
def _GetFileTypes(self):
self._GetTargets()
return self._FileTypes
## Get the list of package object the module depends on
#
# @retval list The package object list
#
def _GetDependentPackageList(self):
return self.Module.Packages
## Return the list of auto-generated code file
#
# @retval list The list of auto-generated file
#
def _GetAutoGenFileList(self):
UniStringAutoGenC = True
IdfStringAutoGenC = True
UniStringBinBuffer = BytesIO()
IdfGenBinBuffer = BytesIO()
if self.BuildType == 'UEFI_HII':
UniStringAutoGenC = False
IdfStringAutoGenC = False
if self._AutoGenFileList is None:
self._AutoGenFileList = {}
AutoGenC = TemplateString()
AutoGenH = TemplateString()
StringH = TemplateString()
StringIdf = TemplateString()
GenC.CreateCode(self, AutoGenC, AutoGenH, StringH, UniStringAutoGenC, UniStringBinBuffer, StringIdf, IdfStringAutoGenC, IdfGenBinBuffer)
#
# AutoGen.c is generated if there are library classes in inf, or there are object files
#
if str(AutoGenC) != "" and (len(self.Module.LibraryClasses) > 0
or TAB_OBJECT_FILE in self.FileTypes):
AutoFile = PathClass(gAutoGenCodeFileName, self.DebugDir)
self._AutoGenFileList[AutoFile] = str(AutoGenC)
self._ApplyBuildRule(AutoFile, TAB_UNKNOWN_FILE)
if str(AutoGenH) != "":
AutoFile = PathClass(gAutoGenHeaderFileName, self.DebugDir)
self._AutoGenFileList[AutoFile] = str(AutoGenH)
self._ApplyBuildRule(AutoFile, TAB_UNKNOWN_FILE)
if str(StringH) != "":
AutoFile = PathClass(gAutoGenStringFileName % {"module_name":self.Name}, self.DebugDir)
self._AutoGenFileList[AutoFile] = str(StringH)
self._ApplyBuildRule(AutoFile, TAB_UNKNOWN_FILE)
if UniStringBinBuffer is not None and UniStringBinBuffer.getvalue() != "":
AutoFile = PathClass(gAutoGenStringFormFileName % {"module_name":self.Name}, self.OutputDir)
self._AutoGenFileList[AutoFile] = UniStringBinBuffer.getvalue()
AutoFile.IsBinary = True
self._ApplyBuildRule(AutoFile, TAB_UNKNOWN_FILE)
if UniStringBinBuffer is not None:
UniStringBinBuffer.close()
if str(StringIdf) != "":
AutoFile = PathClass(gAutoGenImageDefFileName % {"module_name":self.Name}, self.DebugDir)
self._AutoGenFileList[AutoFile] = str(StringIdf)
self._ApplyBuildRule(AutoFile, TAB_UNKNOWN_FILE)
if IdfGenBinBuffer is not None and IdfGenBinBuffer.getvalue() != "":
AutoFile = PathClass(gAutoGenIdfFileName % {"module_name":self.Name}, self.OutputDir)
self._AutoGenFileList[AutoFile] = IdfGenBinBuffer.getvalue()
AutoFile.IsBinary = True
self._ApplyBuildRule(AutoFile, TAB_UNKNOWN_FILE)
if IdfGenBinBuffer is not None:
IdfGenBinBuffer.close()
return self._AutoGenFileList
## Return the list of library modules explicitly or implicityly used by this module
def _GetLibraryList(self):
if self._DependentLibraryList is None:
# only merge library classes and PCD for non-library module
if self.IsLibrary:
self._DependentLibraryList = []
else:
if self.AutoGenVersion < 0x00010005:
self._DependentLibraryList = self.PlatformInfo.ResolveLibraryReference(self.Module)
else:
self._DependentLibraryList = self.PlatformInfo.ApplyLibraryInstance(self.Module)
return self._DependentLibraryList
## Get the list of PCDs from current module
#
# @retval list The list of PCD
#
def _GetModulePcdList(self):
if self._ModulePcdList is None:
# apply PCD settings from platform
self._ModulePcdList = self.PlatformInfo.ApplyPcdSetting(self.Module, self.Module.Pcds)
ExtendCopyDictionaryLists(self._PcdComments, self.Module.PcdComments)
return self._ModulePcdList
## Get the list of PCDs from dependent libraries
#
# @retval list The list of PCD
#
def _GetLibraryPcdList(self):
if self._LibraryPcdList is None:
Pcds = OrderedDict()
if not self.IsLibrary:
# get PCDs from dependent libraries
self._LibraryPcdList = []
for Library in self.DependentLibraryList:
PcdsInLibrary = OrderedDict()
ExtendCopyDictionaryLists(self._PcdComments, Library.PcdComments)
for Key in Library.Pcds:
# skip duplicated PCDs
if Key in self.Module.Pcds or Key in Pcds:
continue
Pcds[Key] = copy.copy(Library.Pcds[Key])
PcdsInLibrary[Key] = Pcds[Key]
self._LibraryPcdList.extend(self.PlatformInfo.ApplyPcdSetting(self.Module, PcdsInLibrary, Library=Library))
else:
self._LibraryPcdList = []
return self._LibraryPcdList
## Get the GUID value mapping
#
# @retval dict The mapping between GUID cname and its value
#
def _GetGuidList(self):
if self._GuidList is None:
self._GuidList = OrderedDict()
self._GuidList.update(self.Module.Guids)
for Library in self.DependentLibraryList:
self._GuidList.update(Library.Guids)
ExtendCopyDictionaryLists(self._GuidComments, Library.GuidComments)
ExtendCopyDictionaryLists(self._GuidComments, self.Module.GuidComments)
return self._GuidList
def GetGuidsUsedByPcd(self):
if self._GuidsUsedByPcd is None:
self._GuidsUsedByPcd = OrderedDict()
self._GuidsUsedByPcd.update(self.Module.GetGuidsUsedByPcd())
for Library in self.DependentLibraryList:
self._GuidsUsedByPcd.update(Library.GetGuidsUsedByPcd())
return self._GuidsUsedByPcd
## Get the protocol value mapping
#
# @retval dict The mapping between protocol cname and its value
#
def _GetProtocolList(self):
if self._ProtocolList is None:
self._ProtocolList = OrderedDict()
self._ProtocolList.update(self.Module.Protocols)
for Library in self.DependentLibraryList:
self._ProtocolList.update(Library.Protocols)
ExtendCopyDictionaryLists(self._ProtocolComments, Library.ProtocolComments)
ExtendCopyDictionaryLists(self._ProtocolComments, self.Module.ProtocolComments)
return self._ProtocolList
## Get the PPI value mapping
#
# @retval dict The mapping between PPI cname and its value
#
def _GetPpiList(self):
if self._PpiList is None:
self._PpiList = OrderedDict()
self._PpiList.update(self.Module.Ppis)
for Library in self.DependentLibraryList:
self._PpiList.update(Library.Ppis)
ExtendCopyDictionaryLists(self._PpiComments, Library.PpiComments)
ExtendCopyDictionaryLists(self._PpiComments, self.Module.PpiComments)
return self._PpiList
## Get the list of include search path
#
# @retval list The list path
#
def _GetIncludePathList(self):
if self._IncludePathList is None:
self._IncludePathList = []
if self.AutoGenVersion < 0x00010005:
for Inc in self.Module.Includes:
if Inc not in self._IncludePathList:
self._IncludePathList.append(Inc)
# for Edk modules
Inc = path.join(Inc, self.Arch.capitalize())
if os.path.exists(Inc) and Inc not in self._IncludePathList:
self._IncludePathList.append(Inc)
# Edk module needs to put DEBUG_DIR at the end of search path and not to use SOURCE_DIR all the time
self._IncludePathList.append(self.DebugDir)
else:
self._IncludePathList.append(self.MetaFile.Dir)
self._IncludePathList.append(self.DebugDir)
for Package in self.Module.Packages:
PackageDir = mws.join(self.WorkspaceDir, Package.MetaFile.Dir)
if PackageDir not in self._IncludePathList:
self._IncludePathList.append(PackageDir)
IncludesList = Package.Includes
if Package._PrivateIncludes:
if not self.MetaFile.Path.startswith(PackageDir):
IncludesList = list(set(Package.Includes).difference(set(Package._PrivateIncludes)))
for Inc in IncludesList:
if Inc not in self._IncludePathList:
self._IncludePathList.append(str(Inc))
return self._IncludePathList
def _GetIncludePathLength(self):
return sum(len(inc)+1 for inc in self._IncludePathList)
## Get HII EX PCDs which maybe used by VFR
#
# efivarstore used by VFR may relate with HII EX PCDs
# Get the variable name and GUID from efivarstore and HII EX PCD
# List the HII EX PCDs in As Built INF if both name and GUID match.
#
# @retval list HII EX PCDs
#
def _GetPcdsMaybeUsedByVfr(self):
if not self.SourceFileList:
return []
NameGuids = set()
for SrcFile in self.SourceFileList:
if SrcFile.Ext.lower() != '.vfr':
continue
Vfri = os.path.join(self.OutputDir, SrcFile.BaseName + '.i')
if not os.path.exists(Vfri):
continue
VfriFile = open(Vfri, 'r')
Content = VfriFile.read()
VfriFile.close()
Pos = Content.find('efivarstore')
while Pos != -1:
#
# Make sure 'efivarstore' is the start of efivarstore statement
# In case of the value of 'name' (name = efivarstore) is equal to 'efivarstore'
#
Index = Pos - 1
while Index >= 0 and Content[Index] in ' \t\r\n':
Index -= 1
if Index >= 0 and Content[Index] != ';':
Pos = Content.find('efivarstore', Pos + len('efivarstore'))
continue
#
# 'efivarstore' must be followed by name and guid
#
Name = gEfiVarStoreNamePattern.search(Content, Pos)
if not Name:
break
Guid = gEfiVarStoreGuidPattern.search(Content, Pos)
if not Guid:
break
NameArray = ConvertStringToByteArray('L"' + Name.group(1) + '"')
NameGuids.add((NameArray, GuidStructureStringToGuidString(Guid.group(1))))
Pos = Content.find('efivarstore', Name.end())
if not NameGuids:
return []
HiiExPcds = []
for Pcd in self.PlatformInfo.Platform.Pcds.values():
if Pcd.Type != TAB_PCDS_DYNAMIC_EX_HII:
continue
for SkuInfo in Pcd.SkuInfoList.values():
Value = GuidValue(SkuInfo.VariableGuid, self.PlatformInfo.PackageList, self.MetaFile.Path)
if not Value:
continue
Name = ConvertStringToByteArray(SkuInfo.VariableName)
Guid = GuidStructureStringToGuidString(Value)
if (Name, Guid) in NameGuids and Pcd not in HiiExPcds:
HiiExPcds.append(Pcd)
break
return HiiExPcds
def _GenOffsetBin(self):
VfrUniBaseName = {}
for SourceFile in self.Module.Sources:
if SourceFile.Type.upper() == ".VFR" :
#
# search the .map file to find the offset of vfr binary in the PE32+/TE file.
#
VfrUniBaseName[SourceFile.BaseName] = (SourceFile.BaseName + "Bin")
elif SourceFile.Type.upper() == ".UNI" :
#
# search the .map file to find the offset of Uni strings binary in the PE32+/TE file.
#
VfrUniBaseName["UniOffsetName"] = (self.Name + "Strings")
if not VfrUniBaseName:
return None
MapFileName = os.path.join(self.OutputDir, self.Name + ".map")
EfiFileName = os.path.join(self.OutputDir, self.Name + ".efi")
VfrUniOffsetList = GetVariableOffset(MapFileName, EfiFileName, VfrUniBaseName.values())
if not VfrUniOffsetList:
return None
OutputName = '%sOffset.bin' % self.Name
UniVfrOffsetFileName = os.path.join( self.OutputDir, OutputName)
try:
fInputfile = open(UniVfrOffsetFileName, "wb+", 0)
except:
EdkLogger.error("build", FILE_OPEN_FAILURE, "File open failed for %s" % UniVfrOffsetFileName, None)
# Use a instance of BytesIO to cache data
fStringIO = BytesIO('')
for Item in VfrUniOffsetList:
if (Item[0].find("Strings") != -1):
#
# UNI offset in image.
# GUID + Offset
# { 0x8913c5e0, 0x33f6, 0x4d86, { 0x9b, 0xf1, 0x43, 0xef, 0x89, 0xfc, 0x6, 0x66 } }
#
UniGuid = [0xe0, 0xc5, 0x13, 0x89, 0xf6, 0x33, 0x86, 0x4d, 0x9b, 0xf1, 0x43, 0xef, 0x89, 0xfc, 0x6, 0x66]
UniGuid = [chr(ItemGuid) for ItemGuid in UniGuid]
fStringIO.write(''.join(UniGuid))
UniValue = pack ('Q', int (Item[1], 16))
fStringIO.write (UniValue)
else:
#
# VFR binary offset in image.
# GUID + Offset
# { 0xd0bc7cb4, 0x6a47, 0x495f, { 0xaa, 0x11, 0x71, 0x7, 0x46, 0xda, 0x6, 0xa2 } };
#
VfrGuid = [0xb4, 0x7c, 0xbc, 0xd0, 0x47, 0x6a, 0x5f, 0x49, 0xaa, 0x11, 0x71, 0x7, 0x46, 0xda, 0x6, 0xa2]
VfrGuid = [chr(ItemGuid) for ItemGuid in VfrGuid]
fStringIO.write(''.join(VfrGuid))
VfrValue = pack ('Q', int (Item[1], 16))
fStringIO.write (VfrValue)
#
# write data into file.
#
try :
fInputfile.write (fStringIO.getvalue())
except:
EdkLogger.error("build", FILE_WRITE_FAILURE, "Write data to file %s failed, please check whether the "
"file been locked or using by other applications." %UniVfrOffsetFileName, None)
fStringIO.close ()
fInputfile.close ()
return OutputName
## Create AsBuilt INF file the module
#
def CreateAsBuiltInf(self, IsOnlyCopy = False):
self.OutputFile = set()
if IsOnlyCopy and GlobalData.gBinCacheDest:
self.CopyModuleToCache()
return
if self.IsAsBuiltInfCreated:
return
# Skip the following code for EDK I inf
if self.AutoGenVersion < 0x00010005:
return
# Skip the following code for libraries
if self.IsLibrary:
return
# Skip the following code for modules with no source files
if not self.SourceFileList:
return
# Skip the following code for modules without any binary files
if self.BinaryFileList:
return
### TODO: How to handles mixed source and binary modules
# Find all DynamicEx and PatchableInModule PCDs used by this module and dependent libraries
# Also find all packages that the DynamicEx PCDs depend on
Pcds = []
PatchablePcds = []
Packages = []
PcdCheckList = []
PcdTokenSpaceList = []
for Pcd in self.ModulePcdList + self.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE:
PatchablePcds.append(Pcd)
PcdCheckList.append((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, TAB_PCDS_PATCHABLE_IN_MODULE))
elif Pcd.Type in PCD_DYNAMIC_EX_TYPE_SET:
if Pcd not in Pcds:
Pcds.append(Pcd)
PcdCheckList.append((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, TAB_PCDS_DYNAMIC_EX))
PcdCheckList.append((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, TAB_PCDS_DYNAMIC))
PcdTokenSpaceList.append(Pcd.TokenSpaceGuidCName)
GuidList = OrderedDict()
GuidList.update(self.GuidList)
for TokenSpace in self.GetGuidsUsedByPcd():
# If token space is not referred by patch PCD or Ex PCD, remove the GUID from GUID list
# The GUIDs in GUIDs section should really be the GUIDs in source INF or referred by Ex an patch PCDs
if TokenSpace not in PcdTokenSpaceList and TokenSpace in GuidList:
GuidList.pop(TokenSpace)
CheckList = (GuidList, self.PpiList, self.ProtocolList, PcdCheckList)
for Package in self.DerivedPackageList:
if Package in Packages:
continue
BeChecked = (Package.Guids, Package.Ppis, Package.Protocols, Package.Pcds)
Found = False
for Index in range(len(BeChecked)):
for Item in CheckList[Index]:
if Item in BeChecked[Index]:
Packages.append(Package)
Found = True
break
if Found:
break
VfrPcds = self._GetPcdsMaybeUsedByVfr()
for Pkg in self.PlatformInfo.PackageList:
if Pkg in Packages:
continue
for VfrPcd in VfrPcds:
if ((VfrPcd.TokenCName, VfrPcd.TokenSpaceGuidCName, TAB_PCDS_DYNAMIC_EX) in Pkg.Pcds or
(VfrPcd.TokenCName, VfrPcd.TokenSpaceGuidCName, TAB_PCDS_DYNAMIC) in Pkg.Pcds):
Packages.append(Pkg)
break
ModuleType = SUP_MODULE_DXE_DRIVER if self.ModuleType == SUP_MODULE_UEFI_DRIVER and self.DepexGenerated else self.ModuleType
DriverType = self.PcdIsDriver if self.PcdIsDriver else ''
Guid = self.Guid
MDefs = self.Module.Defines
AsBuiltInfDict = {
'module_name' : self.Name,
'module_guid' : Guid,
'module_module_type' : ModuleType,
'module_version_string' : [MDefs['VERSION_STRING']] if 'VERSION_STRING' in MDefs else [],
'pcd_is_driver_string' : [],
'module_uefi_specification_version' : [],
'module_pi_specification_version' : [],
'module_entry_point' : self.Module.ModuleEntryPointList,
'module_unload_image' : self.Module.ModuleUnloadImageList,
'module_constructor' : self.Module.ConstructorList,
'module_destructor' : self.Module.DestructorList,
'module_shadow' : [MDefs['SHADOW']] if 'SHADOW' in MDefs else [],
'module_pci_vendor_id' : [MDefs['PCI_VENDOR_ID']] if 'PCI_VENDOR_ID' in MDefs else [],
'module_pci_device_id' : [MDefs['PCI_DEVICE_ID']] if 'PCI_DEVICE_ID' in MDefs else [],
'module_pci_class_code' : [MDefs['PCI_CLASS_CODE']] if 'PCI_CLASS_CODE' in MDefs else [],
'module_pci_revision' : [MDefs['PCI_REVISION']] if 'PCI_REVISION' in MDefs else [],
'module_build_number' : [MDefs['BUILD_NUMBER']] if 'BUILD_NUMBER' in MDefs else [],
'module_spec' : [MDefs['SPEC']] if 'SPEC' in MDefs else [],
'module_uefi_hii_resource_section' : [MDefs['UEFI_HII_RESOURCE_SECTION']] if 'UEFI_HII_RESOURCE_SECTION' in MDefs else [],
'module_uni_file' : [MDefs['MODULE_UNI_FILE']] if 'MODULE_UNI_FILE' in MDefs else [],
'module_arch' : self.Arch,
'package_item' : [Package.MetaFile.File.replace('\\', '/') for Package in Packages],
'binary_item' : [],
'patchablepcd_item' : [],
'pcd_item' : [],
'protocol_item' : [],
'ppi_item' : [],
'guid_item' : [],
'flags_item' : [],
'libraryclasses_item' : []
}
if 'MODULE_UNI_FILE' in MDefs:
UNIFile = os.path.join(self.MetaFile.Dir, MDefs['MODULE_UNI_FILE'])
if os.path.isfile(UNIFile):
shutil.copy2(UNIFile, self.OutputDir)
if self.AutoGenVersion > int(gInfSpecVersion, 0):
AsBuiltInfDict['module_inf_version'] = '0x%08x' % self.AutoGenVersion
else:
AsBuiltInfDict['module_inf_version'] = gInfSpecVersion
if DriverType:
AsBuiltInfDict['pcd_is_driver_string'].append(DriverType)
if 'UEFI_SPECIFICATION_VERSION' in self.Specification:
AsBuiltInfDict['module_uefi_specification_version'].append(self.Specification['UEFI_SPECIFICATION_VERSION'])
if 'PI_SPECIFICATION_VERSION' in self.Specification:
AsBuiltInfDict['module_pi_specification_version'].append(self.Specification['PI_SPECIFICATION_VERSION'])
OutputDir = self.OutputDir.replace('\\', '/').strip('/')
DebugDir = self.DebugDir.replace('\\', '/').strip('/')
for Item in self.CodaTargetList:
File = Item.Target.Path.replace('\\', '/').strip('/').replace(DebugDir, '').replace(OutputDir, '').strip('/')
self.OutputFile.add(File)
if os.path.isabs(File):
File = File.replace('\\', '/').strip('/').replace(OutputDir, '').strip('/')
if Item.Target.Ext.lower() == '.aml':
AsBuiltInfDict['binary_item'].append('ASL|' + File)
elif Item.Target.Ext.lower() == '.acpi':
AsBuiltInfDict['binary_item'].append('ACPI|' + File)
elif Item.Target.Ext.lower() == '.efi':
AsBuiltInfDict['binary_item'].append('PE32|' + self.Name + '.efi')
else:
AsBuiltInfDict['binary_item'].append('BIN|' + File)
if self.DepexGenerated:
self.OutputFile.add(self.Name + '.depex')
if self.ModuleType in [SUP_MODULE_PEIM]:
AsBuiltInfDict['binary_item'].append('PEI_DEPEX|' + self.Name + '.depex')
elif self.ModuleType in [SUP_MODULE_DXE_DRIVER, SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, SUP_MODULE_UEFI_DRIVER]:
AsBuiltInfDict['binary_item'].append('DXE_DEPEX|' + self.Name + '.depex')
elif self.ModuleType in [SUP_MODULE_DXE_SMM_DRIVER]:
AsBuiltInfDict['binary_item'].append('SMM_DEPEX|' + self.Name + '.depex')
Bin = self._GenOffsetBin()
if Bin:
AsBuiltInfDict['binary_item'].append('BIN|%s' % Bin)
self.OutputFile.add(Bin)
for Root, Dirs, Files in os.walk(OutputDir):
for File in Files:
if File.lower().endswith('.pdb'):
AsBuiltInfDict['binary_item'].append('DISPOSABLE|' + File)
self.OutputFile.add(File)
HeaderComments = self.Module.HeaderComments
StartPos = 0
for Index in range(len(HeaderComments)):
if HeaderComments[Index].find('@BinaryHeader') != -1:
HeaderComments[Index] = HeaderComments[Index].replace('@BinaryHeader', '@file')
StartPos = Index
break
AsBuiltInfDict['header_comments'] = '\n'.join(HeaderComments[StartPos:]).replace(':#', '://')
AsBuiltInfDict['tail_comments'] = '\n'.join(self.Module.TailComments)
GenList = [
(self.ProtocolList, self._ProtocolComments, 'protocol_item'),
(self.PpiList, self._PpiComments, 'ppi_item'),
(GuidList, self._GuidComments, 'guid_item')
]
for Item in GenList:
for CName in Item[0]:
Comments = '\n '.join(Item[1][CName]) if CName in Item[1] else ''
Entry = Comments + '\n ' + CName if Comments else CName
AsBuiltInfDict[Item[2]].append(Entry)
PatchList = parsePcdInfoFromMapFile(
os.path.join(self.OutputDir, self.Name + '.map'),
os.path.join(self.OutputDir, self.Name + '.efi')
)
if PatchList:
for Pcd in PatchablePcds:
TokenCName = Pcd.TokenCName
for PcdItem in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem]:
TokenCName = PcdItem[0]
break
for PatchPcd in PatchList:
if TokenCName == PatchPcd[0]:
break
else:
continue
PcdValue = ''
if Pcd.DatumType == 'BOOLEAN':
BoolValue = Pcd.DefaultValue.upper()
if BoolValue == 'TRUE':
Pcd.DefaultValue = '1'
elif BoolValue == 'FALSE':
Pcd.DefaultValue = '0'
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
HexFormat = '0x%02x'
if Pcd.DatumType == TAB_UINT16:
HexFormat = '0x%04x'
elif Pcd.DatumType == TAB_UINT32:
HexFormat = '0x%08x'
elif Pcd.DatumType == TAB_UINT64:
HexFormat = '0x%016x'
PcdValue = HexFormat % int(Pcd.DefaultValue, 0)
else:
if Pcd.MaxDatumSize is None or Pcd.MaxDatumSize == '':
EdkLogger.error("build", AUTOGEN_ERROR,
"Unknown [MaxDatumSize] of PCD [%s.%s]" % (Pcd.TokenSpaceGuidCName, TokenCName)
)
ArraySize = int(Pcd.MaxDatumSize, 0)
PcdValue = Pcd.DefaultValue
if PcdValue[0] != '{':
Unicode = False
if PcdValue[0] == 'L':
Unicode = True
PcdValue = PcdValue.lstrip('L')
PcdValue = eval(PcdValue)
NewValue = '{'
for Index in range(0, len(PcdValue)):
if Unicode:
CharVal = ord(PcdValue[Index])
NewValue = NewValue + '0x%02x' % (CharVal & 0x00FF) + ', ' \
+ '0x%02x' % (CharVal >> 8) + ', '
else:
NewValue = NewValue + '0x%02x' % (ord(PcdValue[Index]) % 0x100) + ', '
Padding = '0x00, '
if Unicode:
Padding = Padding * 2
ArraySize = ArraySize / 2
if ArraySize < (len(PcdValue) + 1):
if Pcd.MaxSizeUserSet:
EdkLogger.error("build", AUTOGEN_ERROR,
"The maximum size of VOID* type PCD '%s.%s' is less than its actual size occupied." % (Pcd.TokenSpaceGuidCName, TokenCName)
)
else:
ArraySize = len(PcdValue) + 1
if ArraySize > len(PcdValue) + 1:
NewValue = NewValue + Padding * (ArraySize - len(PcdValue) - 1)
PcdValue = NewValue + Padding.strip().rstrip(',') + '}'
elif len(PcdValue.split(',')) <= ArraySize:
PcdValue = PcdValue.rstrip('}') + ', 0x00' * (ArraySize - len(PcdValue.split(',')))
PcdValue += '}'
else:
if Pcd.MaxSizeUserSet:
EdkLogger.error("build", AUTOGEN_ERROR,
"The maximum size of VOID* type PCD '%s.%s' is less than its actual size occupied." % (Pcd.TokenSpaceGuidCName, TokenCName)
)
else:
ArraySize = len(PcdValue) + 1
PcdItem = '%s.%s|%s|0x%X' % \
(Pcd.TokenSpaceGuidCName, TokenCName, PcdValue, PatchPcd[1])
PcdComments = ''
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) in self._PcdComments:
PcdComments = '\n '.join(self._PcdComments[Pcd.TokenSpaceGuidCName, Pcd.TokenCName])
if PcdComments:
PcdItem = PcdComments + '\n ' + PcdItem
AsBuiltInfDict['patchablepcd_item'].append(PcdItem)
for Pcd in Pcds + VfrPcds:
PcdCommentList = []
HiiInfo = ''
TokenCName = Pcd.TokenCName
for PcdItem in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem]:
TokenCName = PcdItem[0]
break
if Pcd.Type == TAB_PCDS_DYNAMIC_EX_HII:
for SkuName in Pcd.SkuInfoList:
SkuInfo = Pcd.SkuInfoList[SkuName]
HiiInfo = '## %s|%s|%s' % (SkuInfo.VariableName, SkuInfo.VariableGuid, SkuInfo.VariableOffset)
break
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) in self._PcdComments:
PcdCommentList = self._PcdComments[Pcd.TokenSpaceGuidCName, Pcd.TokenCName][:]
if HiiInfo:
UsageIndex = -1
UsageStr = ''
for Index, Comment in enumerate(PcdCommentList):
for Usage in UsageList:
if Comment.find(Usage) != -1:
UsageStr = Usage
UsageIndex = Index
break
if UsageIndex != -1:
PcdCommentList[UsageIndex] = '## %s %s %s' % (UsageStr, HiiInfo, PcdCommentList[UsageIndex].replace(UsageStr, ''))
else:
PcdCommentList.append('## UNDEFINED ' + HiiInfo)
PcdComments = '\n '.join(PcdCommentList)
PcdEntry = Pcd.TokenSpaceGuidCName + '.' + TokenCName
if PcdComments:
PcdEntry = PcdComments + '\n ' + PcdEntry
AsBuiltInfDict['pcd_item'].append(PcdEntry)
for Item in self.BuildOption:
if 'FLAGS' in self.BuildOption[Item]:
AsBuiltInfDict['flags_item'].append('%s:%s_%s_%s_%s_FLAGS = %s' % (self.ToolChainFamily, self.BuildTarget, self.ToolChain, self.Arch, Item, self.BuildOption[Item]['FLAGS'].strip()))
# Generated LibraryClasses section in comments.
for Library in self.LibraryAutoGenList:
AsBuiltInfDict['libraryclasses_item'].append(Library.MetaFile.File.replace('\\', '/'))
# Generated UserExtensions TianoCore section.
# All tianocore user extensions are copied.
UserExtStr = ''
for TianoCore in self._GetTianoCoreUserExtensionList():
UserExtStr += '\n'.join(TianoCore)
ExtensionFile = os.path.join(self.MetaFile.Dir, TianoCore[1])
if os.path.isfile(ExtensionFile):
shutil.copy2(ExtensionFile, self.OutputDir)
AsBuiltInfDict['userextension_tianocore_item'] = UserExtStr
# Generated depex expression section in comments.
DepexExpresion = self._GetDepexExpresionString()
AsBuiltInfDict['depexsection_item'] = DepexExpresion if DepexExpresion else ''
AsBuiltInf = TemplateString()
AsBuiltInf.Append(gAsBuiltInfHeaderString.Replace(AsBuiltInfDict))
SaveFileOnChange(os.path.join(self.OutputDir, self.Name + '.inf'), str(AsBuiltInf), False)
self.IsAsBuiltInfCreated = True
if GlobalData.gBinCacheDest:
self.CopyModuleToCache()
def CopyModuleToCache(self):
FileDir = path.join(GlobalData.gBinCacheDest, self.Arch, self.SourceDir, self.MetaFile.BaseName)
CreateDirectory (FileDir)
HashFile = path.join(self.BuildDir, self.Name + '.hash')
ModuleFile = path.join(self.OutputDir, self.Name + '.inf')
if os.path.exists(HashFile):
shutil.copy2(HashFile, FileDir)
if os.path.exists(ModuleFile):
shutil.copy2(ModuleFile, FileDir)
if not self.OutputFile:
Ma = self.Workspace.BuildDatabase[PathClass(ModuleFile), self.Arch, self.BuildTarget, self.ToolChain]
self.OutputFile = Ma.Binaries
if self.OutputFile:
for File in self.OutputFile:
File = str(File)
if not os.path.isabs(File):
File = os.path.join(self.OutputDir, File)
if os.path.exists(File):
shutil.copy2(File, FileDir)
def AttemptModuleCacheCopy(self):
if self.IsBinaryModule:
return False
FileDir = path.join(GlobalData.gBinCacheSource, self.Arch, self.SourceDir, self.MetaFile.BaseName)
HashFile = path.join(FileDir, self.Name + '.hash')
if os.path.exists(HashFile):
f = open(HashFile, 'r')
CacheHash = f.read()
f.close()
if GlobalData.gModuleHash[self.Arch][self.Name]:
if CacheHash == GlobalData.gModuleHash[self.Arch][self.Name]:
for root, dir, files in os.walk(FileDir):
for f in files:
if self.Name + '.hash' in f:
shutil.copy2(HashFile, self.BuildDir)
else:
File = path.join(root, f)
shutil.copy2(File, self.OutputDir)
if self.Name == "PcdPeim" or self.Name == "PcdDxe":
CreatePcdDatabaseCode(self, TemplateString(), TemplateString())
return True
return False
## Create makefile for the module and its dependent libraries
#
# @param CreateLibraryMakeFile Flag indicating if or not the makefiles of
# dependent libraries will be created
#
def CreateMakeFile(self, CreateLibraryMakeFile=True, GenFfsList = []):
# Ignore generating makefile when it is a binary module
if self.IsBinaryModule:
return
if self.IsMakeFileCreated:
return
self.GenFfsList = GenFfsList
if not self.IsLibrary and CreateLibraryMakeFile:
for LibraryAutoGen in self.LibraryAutoGenList:
LibraryAutoGen.CreateMakeFile()
if self.CanSkip():
return
if len(self.CustomMakefile) == 0:
Makefile = GenMake.ModuleMakefile(self)
else:
Makefile = GenMake.CustomMakefile(self)
if Makefile.Generate():
EdkLogger.debug(EdkLogger.DEBUG_9, "Generated makefile for module %s [%s]" %
(self.Name, self.Arch))
else:
EdkLogger.debug(EdkLogger.DEBUG_9, "Skipped the generation of makefile for module %s [%s]" %
(self.Name, self.Arch))
self.CreateTimeStamp(Makefile)
self.IsMakeFileCreated = True
def CopyBinaryFiles(self):
for File in self.Module.Binaries:
SrcPath = File.Path
DstPath = os.path.join(self.OutputDir, os.path.basename(SrcPath))
CopyLongFilePath(SrcPath, DstPath)
## Create autogen code for the module and its dependent libraries
#
# @param CreateLibraryCodeFile Flag indicating if or not the code of
# dependent libraries will be created
#
def CreateCodeFile(self, CreateLibraryCodeFile=True):
if self.IsCodeFileCreated:
return
# Need to generate PcdDatabase even PcdDriver is binarymodule
if self.IsBinaryModule and self.PcdIsDriver != '':
CreatePcdDatabaseCode(self, TemplateString(), TemplateString())
return
if self.IsBinaryModule:
if self.IsLibrary:
self.CopyBinaryFiles()
return
if not self.IsLibrary and CreateLibraryCodeFile:
for LibraryAutoGen in self.LibraryAutoGenList:
LibraryAutoGen.CreateCodeFile()
if self.CanSkip():
return
AutoGenList = []
IgoredAutoGenList = []
for File in self.AutoGenFileList:
if GenC.Generate(File.Path, self.AutoGenFileList[File], File.IsBinary):
#Ignore Edk AutoGen.c
if self.AutoGenVersion < 0x00010005 and File.Name == 'AutoGen.c':
continue
AutoGenList.append(str(File))
else:
IgoredAutoGenList.append(str(File))
# Skip the following code for EDK I inf
if self.AutoGenVersion < 0x00010005:
return
for ModuleType in self.DepexList:
# Ignore empty [depex] section or [depex] section for SUP_MODULE_USER_DEFINED module
if len(self.DepexList[ModuleType]) == 0 or ModuleType == SUP_MODULE_USER_DEFINED:
continue
Dpx = GenDepex.DependencyExpression(self.DepexList[ModuleType], ModuleType, True)
DpxFile = gAutoGenDepexFileName % {"module_name" : self.Name}
if len(Dpx.PostfixNotation) != 0:
self.DepexGenerated = True
if Dpx.Generate(path.join(self.OutputDir, DpxFile)):
AutoGenList.append(str(DpxFile))
else:
IgoredAutoGenList.append(str(DpxFile))
if IgoredAutoGenList == []:
EdkLogger.debug(EdkLogger.DEBUG_9, "Generated [%s] files for module %s [%s]" %
(" ".join(AutoGenList), self.Name, self.Arch))
elif AutoGenList == []:
EdkLogger.debug(EdkLogger.DEBUG_9, "Skipped the generation of [%s] files for module %s [%s]" %
(" ".join(IgoredAutoGenList), self.Name, self.Arch))
else:
EdkLogger.debug(EdkLogger.DEBUG_9, "Generated [%s] (skipped %s) files for module %s [%s]" %
(" ".join(AutoGenList), " ".join(IgoredAutoGenList), self.Name, self.Arch))
self.IsCodeFileCreated = True
return AutoGenList
## Summarize the ModuleAutoGen objects of all libraries used by this module
def _GetLibraryAutoGenList(self):
if self._LibraryAutoGenList is None:
self._LibraryAutoGenList = []
for Library in self.DependentLibraryList:
La = ModuleAutoGen(
self.Workspace,
Library.MetaFile,
self.BuildTarget,
self.ToolChain,
self.Arch,
self.PlatformInfo.MetaFile
)
if La not in self._LibraryAutoGenList:
self._LibraryAutoGenList.append(La)
for Lib in La.CodaTargetList:
self._ApplyBuildRule(Lib.Target, TAB_UNKNOWN_FILE)
return self._LibraryAutoGenList
def GenModuleHash(self):
if self.Arch not in GlobalData.gModuleHash:
GlobalData.gModuleHash[self.Arch] = {}
m = hashlib.md5()
# Add Platform level hash
m.update(GlobalData.gPlatformHash)
# Add Package level hash
if self.DependentPackageList:
for Pkg in sorted(self.DependentPackageList, key=lambda x: x.PackageName):
if Pkg.PackageName in GlobalData.gPackageHash[self.Arch]:
m.update(GlobalData.gPackageHash[self.Arch][Pkg.PackageName])
# Add Library hash
if self.LibraryAutoGenList:
for Lib in sorted(self.LibraryAutoGenList, key=lambda x: x.Name):
if Lib.Name not in GlobalData.gModuleHash[self.Arch]:
Lib.GenModuleHash()
m.update(GlobalData.gModuleHash[self.Arch][Lib.Name])
# Add Module self
f = open(str(self.MetaFile), 'r')
Content = f.read()
f.close()
m.update(Content)
# Add Module's source files
if self.SourceFileList:
for File in sorted(self.SourceFileList, key=lambda x: str(x)):
f = open(str(File), 'r')
Content = f.read()
f.close()
m.update(Content)
ModuleHashFile = path.join(self.BuildDir, self.Name + ".hash")
if self.Name not in GlobalData.gModuleHash[self.Arch]:
GlobalData.gModuleHash[self.Arch][self.Name] = m.hexdigest()
if GlobalData.gBinCacheSource:
if self.AttemptModuleCacheCopy():
return False
return SaveFileOnChange(ModuleHashFile, m.hexdigest(), True)
## Decide whether we can skip the ModuleAutoGen process
def CanSkipbyHash(self):
if GlobalData.gUseHashCache:
return not self.GenModuleHash()
return False
## Decide whether we can skip the ModuleAutoGen process
# If any source file is newer than the module than we cannot skip
#
def CanSkip(self):
if self.MakeFileDir in GlobalData.gSikpAutoGenCache:
return True
if not os.path.exists(self.GetTimeStampPath()):
return False
#last creation time of the module
DstTimeStamp = os.stat(self.GetTimeStampPath())[8]
SrcTimeStamp = self.Workspace._SrcTimeStamp
if SrcTimeStamp > DstTimeStamp:
return False
with open(self.GetTimeStampPath(), 'r') as f:
for source in f:
source = source.rstrip('\n')
if not os.path.exists(source):
return False
if source not in ModuleAutoGen.TimeDict :
ModuleAutoGen.TimeDict[source] = os.stat(source)[8]
if ModuleAutoGen.TimeDict[source] > DstTimeStamp:
return False
GlobalData.gSikpAutoGenCache.add(self.MakeFileDir)
return True
def GetTimeStampPath(self):
if self._TimeStampPath is None:
self._TimeStampPath = os.path.join(self.MakeFileDir, 'AutoGenTimeStamp')
return self._TimeStampPath
def CreateTimeStamp(self, Makefile):
FileSet = {self.MetaFile.Path}
for SourceFile in self.Module.Sources:
FileSet.add (SourceFile.Path)
for Lib in self.DependentLibraryList:
FileSet.add (Lib.MetaFile.Path)
for f in self.AutoGenDepSet:
FileSet.add (f.Path)
if os.path.exists (self.GetTimeStampPath()):
os.remove (self.GetTimeStampPath())
with open(self.GetTimeStampPath(), 'w+') as file:
for f in FileSet:
print(f, file=file)
Module = property(_GetModule)
Name = property(_GetBaseName)
Guid = property(_GetGuid)
Version = property(_GetVersion)
ModuleType = property(_GetModuleType)
ComponentType = property(_GetComponentType)
BuildType = property(_GetBuildType)
PcdIsDriver = property(_GetPcdIsDriver)
AutoGenVersion = property(_GetAutoGenVersion)
Macros = property(_GetMacros)
Specification = property(_GetSpecification)
IsLibrary = property(_IsLibrary)
IsBinaryModule = property(_IsBinaryModule)
BuildDir = property(_GetBuildDir)
OutputDir = property(_GetOutputDir)
FfsOutputDir = property(_GetFfsOutputDir)
DebugDir = property(_GetDebugDir)
MakeFileDir = property(_GetMakeFileDir)
CustomMakefile = property(_GetCustomMakefile)
IncludePathList = property(_GetIncludePathList)
IncludePathLength = property(_GetIncludePathLength)
AutoGenFileList = property(_GetAutoGenFileList)
UnicodeFileList = property(_GetUnicodeFileList)
VfrFileList = property(_GetVfrFileList)
SourceFileList = property(_GetSourceFileList)
BinaryFileList = property(_GetBinaryFiles) # FileType : [File List]
Targets = property(_GetTargets)
IntroTargetList = property(_GetIntroTargetList)
CodaTargetList = property(_GetFinalTargetList)
FileTypes = property(_GetFileTypes)
BuildRules = property(_GetBuildRules)
IdfFileList = property(_GetIdfFileList)
DependentPackageList = property(_GetDependentPackageList)
DependentLibraryList = property(_GetLibraryList)
LibraryAutoGenList = property(_GetLibraryAutoGenList)
DerivedPackageList = property(_GetDerivedPackageList)
ModulePcdList = property(_GetModulePcdList)
LibraryPcdList = property(_GetLibraryPcdList)
GuidList = property(_GetGuidList)
ProtocolList = property(_GetProtocolList)
PpiList = property(_GetPpiList)
DepexList = property(_GetDepexTokenList)
DxsFile = property(_GetDxsFile)
DepexExpressionDict = property(_GetDepexExpressionTokenList)
BuildOption = property(_GetModuleBuildOption)
BuildOptionIncPathList = property(_GetBuildOptionIncPathList)
BuildCommand = property(_GetBuildCommand)
FixedAtBuildPcds = property(_GetFixedAtBuildPcds)
UniqueBaseName = property(_GetUniqueBaseName)
FixedVoidTypePcds = property(_GetFixedAtBuildVoidTypePcds)
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
| []
| []
| [
"MAKE_FLAGS",
"PATH"
]
| [] | ["MAKE_FLAGS", "PATH"] | python | 2 | 0 | |
jax/lib/xla_bridge.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface and utility functions to XLA.
This module wraps the XLA client(s) and builders to standardize their interfaces
and provide some automatic type mapping logic for converting between Numpy and
XLA. There are also a handful of related casting utilities.
"""
from functools import partial
import os
from typing import Callable, Dict
import warnings
from absl import logging
import numpy as np
from ..config import flags
from .. import util
from .. import dtypes
import numpy as onp # 'onp' rather than 'np' to distinguish from autograd.numpy
import threading
try:
from . import tpu_client
except ImportError:
tpu_client = None
from . import version
from . import xla_client
xops = xla_client.ops
FLAGS = flags.FLAGS
flags.DEFINE_string(
'jax_xla_backend', 'xla',
'Default is "xla" for the XLA service directly, '
'or "tpu_driver" for using high-performance access to Cloud TPU hardware.')
flags.DEFINE_string(
'jax_backend_target', 'local',
'Either "local" or "rpc:address" to connect to a remote service target.')
flags.DEFINE_string(
'jax_platform_name',
os.getenv('JAX_PLATFORM_NAME', ''),
'Platform name for XLA. The default is to attempt to use a GPU if '
'available, but fall back to CPU otherwise. To set the platform manually, '
'pass "cpu" for CPU or "gpu" for GPU.')
def get_compile_options(num_replicas, num_partitions, device_assignment=None):
"""Returns the compile options to use, as derived from flag values.
Args:
num_replicas: int indicating the number of replicas for which to compile.
num_partitions: int indicating the number of partitions for which to compile.
device_assignment: Optional tuple of integers indicating the assignment of
logical replicas to physical devices (default inherited from
xla_client.CompileOptions). Must be consistent with `num_replicas` and
`num_partitions`.
"""
compile_options = xla_client.CompileOptions()
compile_options.num_replicas = num_replicas
compile_options.num_partitions = num_partitions
if device_assignment is not None:
logging.vlog(
2,
'get_compile_options: num_replicas=%s num_partitions=%s device_assignment=%s',
num_replicas, num_partitions, device_assignment)
device_assignment = onp.array(device_assignment)
# Allow 1D device assignment if num_partitions is 1.
if (device_assignment.ndim == 1) and (num_partitions == 1):
device_assignment = device_assignment[:, None]
if num_replicas != device_assignment.shape[0]:
msg = 'device_assignment does not match num_replicas: {} vs {}.'
raise ValueError(msg.format(device_assignment, num_replicas))
if num_partitions != device_assignment.shape[1]:
msg = 'device_assignment does not match num_partitions: {} vs {}.'
raise ValueError(msg.format(device_assignment, num_partitions))
device_assignment = xla_client.DeviceAssignment.create(device_assignment)
assert device_assignment.replica_count() == num_replicas
assert device_assignment.computation_count() == num_partitions
compile_options.device_assignment = device_assignment
return compile_options
_backends = {}
def register_backend(name, factory):
_backends[name] = factory
def _get_local_backend(platform=None):
if not platform:
platform = FLAGS.jax_platform_name or None
backend = xla_client.get_local_backend(platform)
if backend is None:
raise RuntimeError("No local XLA backends found.")
if backend.platform == 'cpu' and platform != 'cpu':
warnings.warn('No GPU/TPU found, falling back to CPU.')
return backend
register_backend('xla', _get_local_backend)
# memoize the TPU driver to be consistent with xla_client behavior
_tpu_backend = None
def _get_tpu_driver_backend(platform):
del platform
global _tpu_backend
if _tpu_backend is None:
backend_target = FLAGS.jax_backend_target
if backend_target is None:
raise ValueError('When using TPU Driver as the backend, you must specify '
'--jax_backend_target=<hostname>:8470.')
_tpu_backend = tpu_client.TpuBackend.create(worker=backend_target)
return _tpu_backend
if tpu_client:
register_backend('tpu_driver', _get_tpu_driver_backend)
_backend_lock = threading.Lock()
@util.memoize
def get_backend(platform=None):
# TODO(mattjj,skyewm): remove this input polymorphism after we clean up how
# 'backend' values are handled
if not isinstance(platform, (type(None), str)):
return platform
with _backend_lock:
backend = _backends.get(FLAGS.jax_xla_backend)
if backend is None:
msg = 'Unknown jax_xla_backend value "{}".'
raise ValueError(msg.format(FLAGS.jax_xla_backend))
return backend(platform)
def get_device_backend(device=None):
"""Returns the Backend associated with `device`, or the default Backend."""
platform = device.platform if device else None
return get_backend(platform)
def device_count(backend=None):
"""Returns the total number of devices.
On most platforms, this is the same as ``local_device_count()``. However, on
multi-host platforms, this will return the total number of devices across all
hosts.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend. 'cpu', 'gpu', or 'tpu'.
Returns:
Number of devices.
"""
return int(get_backend(backend).device_count())
def local_device_count(backend=None):
"""Returns the number of devices on this host."""
return int(get_backend(backend).local_device_count())
def devices(backend=None):
"""Returns a list of all devices.
Each device is represented by a subclass of Device (e.g. CpuDevice,
GpuDevice). The length of the returned list is equal to
``device_count()``. Local devices can be identified by comparing
``Device.host_id`` to ``host_id()``.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend. 'cpu', 'gpu', or 'tpu'.
Returns:
List of Device subclasses.
"""
return get_backend(backend).devices()
def local_devices(host_id=None, backend=None):
"""Returns a list of devices local to a given host (this host by default)."""
if host_id is None:
host_id = get_backend(backend).host_id()
return [d for d in devices(backend) if d.host_id == host_id]
def host_id(backend=None):
"""Returns the integer host ID of this host.
On most platforms, this will always be 0. This will vary on multi-host
platforms though.
Args:
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the xla backend. 'cpu', 'gpu', or 'tpu'.
Returns:
Integer host ID.
"""
return get_backend(backend).host_id()
def host_ids(backend=None):
"""Returns a sorted list of all host IDs."""
return sorted(list(set(d.host_id for d in devices(backend))))
def host_count(backend=None):
"""Returns the number of hosts."""
return len(host_ids(backend))
### utility functions
@util.memoize
def dtype_to_etype(dtype):
"""Convert from dtype to canonical etype (reading FLAGS.jax_enable_x64)."""
return xla_client.dtype_to_etype(dtypes.canonicalize_dtype(dtype))
@util.memoize
def supported_numpy_dtypes():
return {dtypes.canonicalize_dtype(dtype)
for dtype in xla_client.XLA_ELEMENT_TYPE_TO_DTYPE.values()}
# TODO(mattjj,frostig): try to remove this function
def normalize_to_xla_dtypes(val):
"""Normalize dtypes in a value."""
if hasattr(val, '__array__') or onp.isscalar(val):
return onp.asarray(val,
dtype=dtypes.canonicalize_dtype(dtypes.result_type(val)))
elif isinstance(val, (tuple, list)):
return tuple(normalize_to_xla_dtypes(x) for x in val)
raise TypeError('Can\'t convert to XLA: {}'.format(val))
def _numpy_array_constant(builder, value, canonicalize_types=True):
if canonicalize_types:
value = normalize_to_xla_dtypes(value)
return xops.ConstantLiteral(builder, value)
def parameter(builder, num, shape, name=None, replicated=None):
if name is None:
name = ''
if replicated is None:
replicated = []
elif isinstance(replicated, bool):
replicated = [replicated] * shape.leaf_count()
return xops.Parameter(builder, num,
shape.with_major_to_minor_layout_if_absent(), name,
replicated)
def constant(builder, py_val, canonicalize_types=True):
"""Translate constant `py_val` to a constant, canonicalizing its dtype.
Args:
py_val: a Python value to be translated to a constant.
Returns:
A representation of the constant, either a ComputationDataHandle or None
"""
py_type = type(py_val)
if py_type in _constant_handlers:
return _constant_handlers[py_type](builder, py_val, canonicalize_types)
else:
raise TypeError("No constant handler for type: {}".format(py_type))
def make_computation_builder(name):
return xla_client.XlaBuilder(name)
def register_constant_handler(type_, handler_fun):
_constant_handlers[type_] = handler_fun
_constant_handlers: Dict[type, Callable] = {}
def _ndarray_constant_handler(c, val, canonicalize_types=True):
"""Constant handler for ndarray literals, handling zero-size strides.
This function essentially calls _numpy_array_constant(val) except it has
special handling of arrays with any strides of size zero: for those, it
generates appropriate calls to NumpyArrayConstant, Broadcast, and Transpose
to avoid staging in large literals that might arise from np.zeros or np.ones
or the output of lax.broadcast (which uses onp.broadcast_to which in turn
uses size-zero strides).
Args:
c: an XlaBuilder
val: an ndarray.
Returns:
An XLA ComputationDataHandle / XlaOp representing the constant ndarray
staged into the XLA Computation.
"""
# TODO(mattjj): revise this to use xops.BroadcastInDim rather than Transpose
if onp.any(onp.equal(0, val.strides)) and val.size > 0:
zero_stride_axes, = onp.where(onp.equal(0, val.strides))
other_axes, = onp.where(onp.not_equal(0, val.strides))
collapsed_val = val[tuple(0 if ax in zero_stride_axes else slice(None)
for ax in range(val.ndim))]
xla_val = xops.Broadcast(
_numpy_array_constant(c, collapsed_val, canonicalize_types),
onp.take(val.shape, zero_stride_axes))
permutation = onp.argsort(tuple(zero_stride_axes) + tuple(other_axes))
return xops.Transpose(xla_val, permutation)
else:
return _numpy_array_constant(c, val, canonicalize_types)
register_constant_handler(onp.ndarray, _ndarray_constant_handler)
def _scalar_constant_handler(c, val, canonicalize_types=True):
return _numpy_array_constant(c, val, canonicalize_types)
for scalar_type in [onp.int8, onp.int16, onp.int32, onp.int64,
onp.uint8, onp.uint16, onp.uint32, onp.uint64,
onp.float16, onp.float32, onp.float64, onp.float128,
onp.bool_, onp.longlong]:
register_constant_handler(scalar_type, _scalar_constant_handler)
def _python_scalar_handler(dtype, c, val, canonicalize_dtypes=True):
return _numpy_array_constant(c, dtype.type(val))
for ptype, dtype in dtypes.python_scalar_dtypes.items():
register_constant_handler(ptype, partial(_python_scalar_handler, dtype))
| []
| []
| [
"JAX_PLATFORM_NAME"
]
| [] | ["JAX_PLATFORM_NAME"] | python | 1 | 0 | |
app/rest/dao/src/main/java/io/syndesis/dao/init/ReadApiClientData.java | /*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.syndesis.dao.init;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Scanner;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonMappingException;
import io.syndesis.core.Json;
import io.syndesis.dao.manager.EncryptionComponent;
import io.syndesis.model.ModelData;
public class ReadApiClientData {
private static final TypeReference<List<ModelData<?>>> MODEL_DATA_TYPE = new TypeReference<List<ModelData<?>>>(){};
private static final Pattern PATTERN = Pattern.compile("\\@(.*?)\\@");
private final EncryptionComponent encryptionComponent;
public ReadApiClientData() {
this(new EncryptionComponent(null));
}
public ReadApiClientData(EncryptionComponent encryptionComponent) {
this.encryptionComponent = encryptionComponent;
}
/**
*
* @param fileName
* @return
* @throws JsonParseException
* @throws JsonMappingException
* @throws IOException
*/
public List<ModelData<?>> readDataFromFile(String fileName) throws JsonParseException, JsonMappingException, IOException {
try (InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream(fileName)) {
if (is==null) {
throw new FileNotFoundException("Cannot find file " + fileName + " on classpath");
}
String jsonText = findAndReplaceTokens(from(is),System.getenv());
return Json.reader().forType(MODEL_DATA_TYPE).readValue(jsonText);
}
}
public List<ModelData<?>> readDataFromString(String jsonText) throws JsonParseException, JsonMappingException, IOException {
String json = findAndReplaceTokens(jsonText,System.getenv());
return Json.reader().forType(MODEL_DATA_TYPE).readValue(json);
}
/**
* Reads the InputStream and returns a String containing all content from the InputStream.
* @param is - InputStream that will be read.
* @return String containing all content from the InputStream
*/
public String from(InputStream is) {
try (Scanner scanner = new Scanner(is, "UTF-8") ) {
return scanner.useDelimiter("\\A").next();
}
}
/**
* Finds tokens surrounded by "@" signs (for example @POSTGRESQL_SAMPLEDB_PASSWORD@) and replaces them
* with values from System.env if a value is set in the environment.
*
* @param jsonText - String containing tokens
* @param env - containing tokens
* @return String with tokens resolved from env
*/
public String findAndReplaceTokens(String jsonText, Map<String,String> env) {
Matcher m = PATTERN.matcher(jsonText);
String json = jsonText;
while(m.find()) {
final String token = m.group(1).toUpperCase(Locale.US);
String envKey = token;
if( token.startsWith("ENC:") ) {
envKey = EncryptionComponent.stripPrefix(token, "ENC:");
}
String value = env.get(envKey);
if (value!=null) {
if( token.startsWith("ENC:") ) {
value = encryptionComponent.encrypt(value);
}
json = jsonText.replaceAll("@" + token + "@", value);
}
}
return json;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
misc/hitchhike/files/hitchhike/server.py | #!/usr/bin/env python3.9
import os
def f(x):
print(f'value 1: {repr(x)}')
v = input('value 2: ')
if len(v) > 8: return
return eval(f'{x} * {v}', {}, {})
if __name__ == '__main__':
print("+---------------------------------------------------+")
print("| The Answer to the Ultimate Question of Life, |")
print("| the Universe, and Everything is 42 |")
print("+---------------------------------------------------+")
for x in [6, 6.6, '666', [6666], {b'6':6666}]:
if f(x) != 42:
print("Something is fundamentally wrong with your universe.")
exit(1)
else:
print("Correct!")
print("Congrats! Here is your flag:")
print(os.getenv("FLAG", "FAKECON{try it on remote}"))
| []
| []
| [
"FLAG"
]
| [] | ["FLAG"] | python | 1 | 0 | |
sdk/eventhub/azure-eventhubs/samples/sync_samples/client_secret_auth.py | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
An example to show authentication using aad credentials
"""
import os
from azure.eventhub import EventData, EventHubProducerClient
from azure.identity import EnvironmentCredential
fully_qualified_namespace = os.environ['EVENT_HUB_HOSTNAME']
eventhub_name = os.environ['EVENT_HUB_NAME']
credential = EnvironmentCredential()
producer = EventHubProducerClient(fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential)
with producer:
event = EventData(body='A single message')
producer.send(event, partition_id='0')
| []
| []
| [
"EVENT_HUB_HOSTNAME",
"EVENT_HUB_NAME"
]
| [] | ["EVENT_HUB_HOSTNAME", "EVENT_HUB_NAME"] | python | 2 | 0 | |
cmd/action-label-syncer/main.go | // Copyright 2020 micnncim
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"github.com/micnncim/action-label-syncer/pkg/github"
)
func main() {
manifest := os.Getenv("INPUT_MANIFEST")
labels, err := github.FromManifestToLabels(manifest)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to load manifest: %v\n", err)
os.Exit(1)
}
token := os.Getenv("INPUT_TOKEN")
if len(token) == 0 {
token = os.Getenv("GITHUB_TOKEN")
}
client := github.NewClient(token)
prune, err := strconv.ParseBool(os.Getenv("INPUT_PRUNE"))
if err != nil {
fmt.Fprintf(os.Stderr, "unable to parse prune: %v\n", err)
os.Exit(1)
}
repoinput := os.Getenv("INPUT_REPOSITORY")
if len(repoinput) == 0 {
repoinput = os.Getenv("GITHUB_REPOSITORY")
}
repolist := strings.Fields(repoinput) // splits repo input based on whitespace
for _, repoitem := range repolist {
slugs := strings.Split(repoitem, "/")
if len(slugs) != 2 {
fmt.Fprintf(os.Stderr, "invalid repository: %v\n", repoitem)
os.Exit(1)
}
owner, repo := slugs[0], slugs[1]
ctx := context.Background()
if err := client.SyncLabels(ctx, owner, repo, labels, prune); err != nil {
fmt.Fprintf(os.Stderr, "unable to sync labels: %v\n", err)
os.Exit(1)
}
}
}
| [
"\"INPUT_MANIFEST\"",
"\"INPUT_TOKEN\"",
"\"GITHUB_TOKEN\"",
"\"INPUT_PRUNE\"",
"\"INPUT_REPOSITORY\"",
"\"GITHUB_REPOSITORY\""
]
| []
| [
"INPUT_PRUNE",
"GITHUB_REPOSITORY",
"INPUT_MANIFEST",
"INPUT_TOKEN",
"INPUT_REPOSITORY",
"GITHUB_TOKEN"
]
| [] | ["INPUT_PRUNE", "GITHUB_REPOSITORY", "INPUT_MANIFEST", "INPUT_TOKEN", "INPUT_REPOSITORY", "GITHUB_TOKEN"] | go | 6 | 0 | |
qa/rpc-tests/test_framework/util.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
BITCOIND_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same tip
"""
while timeout > 0:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "condorcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('condorcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("CONDORCOIND", "condorcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: condorcoind started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("CONDORCOIND", "condorcoind")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: condorcoind started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
wait_bitcoinds()
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s CDR too low! (Should be %s CDR)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s CDR too high! (Should be %s CDR)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
| []
| []
| [
"CONDORCOIND",
"PYTHON_DEBUG"
]
| [] | ["CONDORCOIND", "PYTHON_DEBUG"] | python | 2 | 0 | |
tau/core/views.py | import os
import requests
import datetime
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse, Http404
from django.template import loader
from django.contrib.auth import login
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from requests import status_codes
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework import viewsets, status
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from constance import config
import constance.settings
from tau.twitch.models import TwitchAPIScope, TwitchEventSubSubscription
from tau.users.models import User
from .forms import ChannelNameForm, FirstRunForm
from .utils import cleanup_remote_webhooks, cleanup_webhooks, log_request, check_access_token_expired, refresh_access_token, teardown_all_acct_webhooks, teardown_webhooks
from tau.twitch.models import TwitchHelixEndpoint
@api_view(['POST'])
def irc_message_view(request):
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)('twitchchat', {
'type': 'twitchchat.event',
'data': request.data
})
return Response({}, status=status.HTTP_201_CREATED)
@api_view(['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])
def helix_view(request, helix_path=None):
if check_access_token_expired():
refresh_access_token()
try:
endpoint_instance = TwitchHelixEndpoint.objects.get(
endpoint=helix_path,
method=request.method
)
if endpoint_instance.token_type == 'OA':
token = config.TWITCH_ACCESS_TOKEN
else:
token = config.TWITCH_APP_ACCESS_TOKEN
except TwitchHelixEndpoint.DoesNotExist:
token = config.TWITCH_ACCESS_TOKEN
body = request.data
client_id = os.environ.get('TWITCH_APP_ID', None)
headers = {
'Authorization': 'Bearer {}'.format(token),
'Client-Id': client_id
}
url = f'https://api.twitch.tv/helix/' \
f'{helix_path}'
uri = request.build_absolute_uri()
url_params = ''
if uri.count('?') > 0:
url_params = uri.split('?', 1)[1]
if url_params != '':
url += f'?{url_params}'
if request.method == 'GET':
data = requests.get(
url,
headers=headers
)
elif request.method == 'POST':
data = requests.post(
url,
data=body,
headers=headers
)
elif request.method == 'PUT':
data = requests.put(
url,
data=body,
headers=headers
)
print(data)
elif request.method == 'PATCH':
data = requests.patch(
url,
data=body,
headers=headers
)
elif request.method == 'DELETE':
data = requests.delete(
url,
headers=headers
)
try:
if(settings.DEBUG_TWITCH_CALLS):
log_request(data)
stream_data = data.json()
except ValueError:
stream_data = None
return Response(stream_data, status=data.status_code)
def home_view(request):
user_count = User.objects.all().exclude(username='worker_process').count()
if user_count == 0:
return HttpResponseRedirect('/first-run/')
# elif not request.user.is_authenticated:
# return HttpResponseRedirect('/accounts/login/')
elif config.CHANNEL == '':
return HttpResponseRedirect('/set-channel/')
elif config.SCOPE_UPDATED_NEEDED:
return HttpResponseRedirect('/refresh-token-scope/')
else:
# # template = loader.get_template('home.html')
# template = loader.get_template('dashboard/index.html')
# return HttpResponse(template.render({'config': config}, request))
return HttpResponseRedirect('/dashboard')
def first_run_view(request):
user_count = User.objects.all().exclude(username='worker_process').count()
if user_count > 0: # If users already exist, it is not first run
return HttpResponseRedirect('/') # reject creating a new super-user
if request.method == 'POST':
form = FirstRunForm(request.POST)
if form.is_valid():
user = User.objects.create_user(
form.cleaned_data['username'],
password=form.cleaned_data['password1']
)
user.is_superuser=True
user.is_staff=True
user.save()
login(request, user)
return HttpResponseRedirect('/')
else:
template = loader.get_template('registration/first-run.html')
return HttpResponse(template.render({}, request))
else:
template = loader.get_template('registration/first-run.html')
return HttpResponse(template.render({}, request))
def get_channel_name_view(request):
if request.method == 'POST':
port = os.environ.get('PORT', 8000)
form = ChannelNameForm(request.POST)
if form.is_valid():
# Process the data
config.CHANNEL = form.cleaned_data['channel_name']
scope=' '.join(settings.TOKEN_SCOPES)
client_id = os.environ.get('TWITCH_APP_ID', None)
url = f'https://id.twitch.tv/oauth2/authorize?' \
f'client_id={client_id}&' \
f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \
f'response_type=code&' \
f'scope={scope}&' \
f'force_verify=true'
return HttpResponseRedirect(url)
else:
# Show some error page
pass
else:
template = loader.get_template('registration/twitch-channel-setup.html')
return HttpResponse(template.render({}, request))
def refresh_token_scope(request):
client_id = os.environ.get('TWITCH_APP_ID', None)
helix_scopes = list(
TwitchAPIScope.objects.filter(
required=True
).values_list('scope', flat=True)
)
eventsub_scopes = list(
TwitchEventSubSubscription.objects.filter(
active=True
).values_list('scope_required', flat=True)
)
scopes = list(set(settings.TOKEN_SCOPES + eventsub_scopes + helix_scopes))
scopes = list(filter(lambda x: (x is not None), scopes))
scope=' '.join(scopes)
url = f'https://id.twitch.tv/oauth2/authorize?' \
f'client_id={client_id}&' \
f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \
f'response_type=code&' \
f'scope={scope}&' \
f'force_verify=true'
return HttpResponseRedirect(url)
@api_view()
def get_tau_token(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
token = Token.objects.get(user=request.user)
return JsonResponse({'token': token.key})
@api_view(['GET'])
def get_public_url(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
public_url = config.PUBLIC_URL
return JsonResponse({'public_url': public_url})
@api_view(['POST'])
def refresh_tau_token(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
token = Token.objects.get(user=request.user)
token.delete()
token = Token.objects.create(user=request.user)
return JsonResponse({'token': token.key})
@api_view(['POST'])
def reset_webhooks(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
data = request.data
if data['type'] == 'all':
teardown_all_acct_webhooks()
elif data['type'] == 'remote':
token = Token.objects.get(user=request.user)
cleanup_remote_webhooks()
elif data['type'] == 'broken':
token = Token.objects.get(user=request.user)
cleanup_webhooks()
else:
return JsonResponse({'webhooks_reset': False, 'error': 'Proper type not found.'})
config.FORCE_WEBHOOK_REFRESH = True
return JsonResponse({'webhooks_reset': True})
def process_twitch_callback_view(request):
port = os.environ.get('PORT', 8000)
params = request.GET
auth_code = params['code']
client_id = os.environ.get('TWITCH_APP_ID', None)
client_secret = os.environ.get('TWITCH_CLIENT_SECRET', None)
auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = {
'client_id': client_id,
'client_secret': client_secret,
'code': auth_code,
'grant_type': 'authorization_code',
'redirect_uri': f'{settings.BASE_URL}/twitch-callback/'
})
response_data = auth_r.json()
if(settings.DEBUG_TWITCH_CALLS):
log_request(auth_r)
config.TWITCH_ACCESS_TOKEN = response_data['access_token']
config.TWITCH_REFRESH_TOKEN = response_data['refresh_token']
expiration = timezone.now() + datetime.timedelta(seconds=response_data['expires_in'])
config.TWITCH_ACCESS_TOKEN_EXPIRATION = expiration
scope=' '.join(settings.TOKEN_SCOPES)
app_auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials',
'scope': scope
})
if(settings.DEBUG_TWITCH_CALLS):
log_request(app_auth_r)
app_auth_data = app_auth_r.json()
config.TWITCH_APP_ACCESS_TOKEN = app_auth_data['access_token']
config.SCOPE_UPDATED_NEEDED = False
config.SCOPES_REFRESHED = True
headers = {
'Authorization': 'Bearer {}'.format(config.TWITCH_ACCESS_TOKEN),
'Client-Id': client_id
}
user_r = requests.get('https://api.twitch.tv/helix/users', headers=headers)
if(settings.DEBUG_TWITCH_CALLS):
log_request(user_r)
user_data = user_r.json()
channel_id = user_data['data'][0]['id']
config.CHANNEL_ID = channel_id
return HttpResponseRedirect('/')
class HeartbeatViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticatedOrReadOnly, )
def list(self, request, *args, **kwargs):
response = {'message': 'pong'}
return Response(response)
class TAUSettingsViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticated, )
valid_keys = ['USE_IRC']
def list(self, request, *args, **kwargs):
response = {key.lower(): getattr(config, key) for key in self.valid_keys}
return Response(response)
def retrieve(self, request, pk=None):
if pk.upper() in self.valid_keys:
return Response({pk: getattr(config, pk.upper())})
else:
raise Http404
def update(self, request, pk=None):
if pk.upper() in self.valid_keys:
data = request.data
setattr(config, pk.upper(), data['value'])
return Response({pk: data['value']})
else:
raise Http404
class ServiceStatusViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticated, )
def update(self, request, pk=None):
if pk.startswith('STATUS_') and hasattr(config, pk):
data = request.data
new_status = data['status']
setattr(config, pk, new_status)
return Response({
pk: new_status
})
elif pk == 'SET_ALL':
status_keys = filter(
lambda x: x.startswith('STATUS_'),
constance.settings.CONFIG.keys()
)
data = request.data
new_status = data['status']
for key in status_keys:
setattr(config, key, new_status)
return Response({
'reset': 'complete'
})
else:
raise Http404("Config does not exist")
| []
| []
| [
"PORT",
"TWITCH_APP_ID",
"TWITCH_CLIENT_SECRET"
]
| [] | ["PORT", "TWITCH_APP_ID", "TWITCH_CLIENT_SECRET"] | python | 3 | 0 | |
history/core/views.py | from flask import Response, abort, request, jsonify
from core import app
from . import database
@app.route('/')
def index():
return 'History service online'
@app.post('/viewed')
def viewed():
payload = request.get_json()
if not payload:
abort(400)
videoPath = payload['videoId']
if not videoPath:
abort(400)
database.addToHistory(videoPath)
return Response(None, 200)
@app.get('/viewed')
def get_viewed():
return jsonify(database.select())
| []
| []
| []
| [] | [] | python | null | null | null |
pkg/v1/cli/pluginmanager/manager_test.go | // Copyright 2021 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package pluginmanager
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/otiai10/copy"
"github.com/stretchr/testify/assert"
cliv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/cli/v1alpha1"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/common"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/plugin"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/config"
"github.com/vmware-tanzu/tanzu-framework/pkg/v1/tkg/log"
)
const (
testcaseInstallManagementCluster = "install-management-cluster"
testcaseInstallLogin = "install-login"
testcaseInstallCluster = "install-cluster"
testcaseInstallNotexists = "install-notexists"
)
func Test_DiscoverPlugins(t *testing.T) {
assert := assert.New(t)
defer setupLocalDistoForTesting()()
serverPlugins, standalonePlugins := DiscoverPlugins("")
assert.Equal(0, len(serverPlugins))
assert.Equal(2, len(standalonePlugins))
serverPlugins, standalonePlugins = DiscoverPlugins("mgmt-does-not-exists")
assert.Equal(0, len(serverPlugins))
assert.Equal(2, len(standalonePlugins))
serverPlugins, standalonePlugins = DiscoverPlugins("mgmt")
assert.Equal(1, len(serverPlugins))
assert.Equal(2, len(standalonePlugins))
assert.Equal("cluster", serverPlugins[0].Name)
assert.Contains([]string{"login", "management-cluster"}, standalonePlugins[0].Name)
assert.Contains([]string{"login", "management-cluster"}, standalonePlugins[1].Name)
}
func Test_InstallPlugin_InstalledPlugins(t *testing.T) {
assert := assert.New(t)
defer setupLocalDistoForTesting()()
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
// Try installing nonexistent plugin
err := InstallPlugin("", "notexists", "v0.2.0")
assert.NotNil(err)
assert.Contains(err.Error(), "unable to find plugin 'notexists'")
// Install login (standalone) plugin
err = InstallPlugin("", "login", "v0.2.0")
assert.Nil(err)
// Verify installed plugin
installedServerPlugins, installedStandalonePlugins, err := InstalledPlugins("")
assert.Nil(err)
assert.Equal(0, len(installedServerPlugins))
assert.Equal(1, len(installedStandalonePlugins))
assert.Equal("login", installedStandalonePlugins[0].Name)
// Try installing cluster plugin through standalone discovery
err = InstallPlugin("", "cluster", "v0.2.0")
assert.NotNil(err)
assert.Contains(err.Error(), "unable to find plugin 'cluster'")
// Try installing cluster plugin through context discovery
err = InstallPlugin("mgmt", "cluster", "v0.2.0")
assert.Nil(err)
// Verify installed plugins
installedServerPlugins, installedStandalonePlugins, err = InstalledPlugins("mgmt")
assert.Nil(err)
assert.Equal(1, len(installedStandalonePlugins))
assert.Equal("login", installedStandalonePlugins[0].Name)
assert.Equal(1, len(installedServerPlugins))
assert.Equal("cluster", installedServerPlugins[0].Name)
}
func Test_AvailablePlugins(t *testing.T) {
assert := assert.New(t)
defer setupLocalDistoForTesting()()
discovered, err := AvailablePlugins("")
assert.Nil(err)
assert.Equal(2, len(discovered))
assert.Equal("management-cluster", discovered[0].Name)
assert.Equal(common.PluginScopeStandalone, discovered[0].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[0].Status)
assert.Equal("login", discovered[1].Name)
assert.Equal(common.PluginScopeStandalone, discovered[1].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[1].Status)
discovered, err = AvailablePlugins("mgmt")
assert.Nil(err)
assert.Equal(3, len(discovered))
assert.Equal("cluster", discovered[0].Name)
assert.Equal(common.PluginScopeContext, discovered[0].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[0].Status)
assert.Equal("management-cluster", discovered[1].Name)
assert.Equal(common.PluginScopeStandalone, discovered[1].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[1].Status)
assert.Equal("login", discovered[2].Name)
assert.Equal(common.PluginScopeStandalone, discovered[2].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[2].Status)
// Install login, cluster package
mockInstallPlugin(assert, "", "login", "v0.2.0")
mockInstallPlugin(assert, "mgmt", "cluster", "v0.2.0")
// Get available plugin after install and verify installation status
discovered, err = AvailablePlugins("mgmt")
assert.Nil(err)
assert.Equal(3, len(discovered))
assert.Equal("cluster", discovered[0].Name)
assert.Equal(common.PluginScopeContext, discovered[0].Scope)
assert.Equal(common.PluginStatusInstalled, discovered[0].Status)
assert.Equal("login", discovered[2].Name)
assert.Equal(common.PluginScopeStandalone, discovered[2].Scope)
assert.Equal(common.PluginStatusInstalled, discovered[2].Status)
}
func Test_AvailablePlugins_From_LocalSource(t *testing.T) {
assert := assert.New(t)
currentDirAbsPath, _ := filepath.Abs(".")
discovered, err := AvailablePluginsFromLocalSource(filepath.Join(currentDirAbsPath, "test", "local"))
assert.Nil(err)
assert.Equal(3, len(discovered))
assert.Equal("cluster", discovered[0].Name)
assert.Equal(common.PluginScopeStandalone, discovered[0].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[0].Status)
assert.Equal("management-cluster", discovered[1].Name)
assert.Equal(common.PluginScopeStandalone, discovered[1].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[1].Status)
assert.Equal("login", discovered[2].Name)
assert.Equal(common.PluginScopeStandalone, discovered[2].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[2].Status)
}
func Test_InstallPlugin_InstalledPlugins_From_LocalSource(t *testing.T) {
assert := assert.New(t)
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
currentDirAbsPath, _ := filepath.Abs(".")
localPluginSourceDir := filepath.Join(currentDirAbsPath, "test", "local")
// Try installing nonexistent plugin
err := InstallPluginsFromLocalSource("notexists", "v0.2.0", localPluginSourceDir)
assert.NotNil(err)
assert.Contains(err.Error(), "unable to find plugin 'notexists'")
// Install login from local source directory
err = InstallPluginsFromLocalSource("login", "v0.2.0", localPluginSourceDir)
assert.Nil(err)
// Verify installed plugin
installedServerPlugins, installedStandalonePlugins, err := InstalledPlugins("")
assert.Nil(err)
assert.Equal(0, len(installedServerPlugins))
assert.Equal(1, len(installedStandalonePlugins))
assert.Equal("login", installedStandalonePlugins[0].Name)
// Try installing cluster plugin from local source directory
err = InstallPluginsFromLocalSource("cluster", "v0.2.0", localPluginSourceDir)
assert.Nil(err)
installedServerPlugins, installedStandalonePlugins, err = InstalledPlugins("")
assert.Nil(err)
assert.Equal(0, len(installedServerPlugins))
assert.Equal(2, len(installedStandalonePlugins))
// Try installing a plugin from incorrect local path
err = InstallPluginsFromLocalSource("cluster", "v0.2.0", "fakepath")
assert.NotNil(err)
assert.Contains(err.Error(), "no such file or directory")
}
func Test_DescribePlugin(t *testing.T) {
assert := assert.New(t)
defer setupLocalDistoForTesting()()
// Try describe plugin when plugin is not installed
_, err := DescribePlugin("", "login")
assert.NotNil(err)
assert.Contains(err.Error(), "could not get plugin path for plugin \"login\"")
// Install login (standalone) package
mockInstallPlugin(assert, "", "login", "v0.2.0")
// Try describe plugin when plugin after installing plugin
pd, err := DescribePlugin("", "login")
assert.Nil(err)
assert.Equal("login", pd.Name)
assert.Equal("v0.2.0", pd.Version)
// Try describe plugin when plugin is not installed
_, err = DescribePlugin("mgmt", "cluster")
assert.NotNil(err)
assert.Contains(err.Error(), "could not get plugin path for plugin \"cluster\"")
// Install cluster (context) package
// Install login (standalone) package
mockInstallPlugin(assert, "mgmt", "cluster", "v0.2.0")
// Try describe plugin when plugin after installing plugin
pd, err = DescribePlugin("mgmt", "cluster")
assert.Nil(err)
assert.Equal("cluster", pd.Name)
assert.Equal("v0.2.0", pd.Version)
}
func Test_DeletePlugin(t *testing.T) {
assert := assert.New(t)
defer setupLocalDistoForTesting()()
// Try delete plugin when plugin is not installed
err := DeletePlugin("", "login")
assert.NotNil(err)
assert.Contains(err.Error(), "could not get plugin path for plugin \"login\"")
// Install login (standalone) package
mockInstallPlugin(assert, "", "login", "v0.2.0")
// Try delete plugin when plugin is installed
err = DeletePlugin("mgmt", "cluster")
assert.NotNil(err)
assert.Contains(err.Error(), "could not get plugin path for plugin \"cluster\"")
// Install cluster (context) package
mockInstallPlugin(assert, "mgmt", "cluster", "v0.2.0")
// Try describe plugin when plugin after installing plugin
err = DeletePlugin("mgmt", "cluster")
assert.Nil(err)
}
func Test_ValidatePlugin(t *testing.T) {
assert := assert.New(t)
pd := cliv1alpha1.PluginDescriptor{}
err := ValidatePlugin(&pd)
assert.Contains(err.Error(), "plugin name cannot be empty")
pd.Name = "fakeplugin"
err = ValidatePlugin(&pd)
assert.NotContains(err.Error(), "plugin name cannot be empty")
assert.Contains(err.Error(), "plugin \"fakeplugin\" version cannot be empty")
assert.Contains(err.Error(), "plugin \"fakeplugin\" group cannot be empty")
}
func Test_SyncPlugins_Standalone_Plugins(t *testing.T) {
assert := assert.New(t)
defer setupLocalDistoForTesting()()
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
// Get available standalone plugins and verify the status is `not installed`
discovered, err := AvailablePlugins("")
assert.Nil(err)
assert.Equal(2, len(discovered))
assert.Equal("management-cluster", discovered[0].Name)
assert.Equal(common.PluginScopeStandalone, discovered[0].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[0].Status)
assert.Equal("login", discovered[1].Name)
assert.Equal(common.PluginScopeStandalone, discovered[1].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[1].Status)
// Sync standalone plugins
err = SyncPlugins("")
assert.Nil(err)
// Get available standalone plugins and verify the status is updated to `installed`
discovered, err = AvailablePlugins("")
assert.Nil(err)
assert.Equal(2, len(discovered))
assert.Equal("management-cluster", discovered[0].Name)
assert.Equal(common.PluginScopeStandalone, discovered[0].Scope)
assert.Equal(common.PluginStatusInstalled, discovered[0].Status)
assert.Equal("login", discovered[1].Name)
assert.Equal(common.PluginScopeStandalone, discovered[1].Scope)
assert.Equal(common.PluginStatusInstalled, discovered[1].Status)
}
func Test_SyncPlugins_All_Plugins(t *testing.T) {
assert := assert.New(t)
defer setupLocalDistoForTesting()()
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
// Get all available plugins(standalone+context-aware) and verify the status is `not installed`
discovered, err := AvailablePlugins("mgmt")
assert.Nil(err)
assert.Equal(3, len(discovered))
assert.Equal("cluster", discovered[0].Name)
assert.Equal(common.PluginScopeContext, discovered[0].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[0].Status)
assert.Equal("management-cluster", discovered[1].Name)
assert.Equal(common.PluginScopeStandalone, discovered[1].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[1].Status)
assert.Equal("login", discovered[2].Name)
assert.Equal(common.PluginScopeStandalone, discovered[2].Scope)
assert.Equal(common.PluginStatusNotInstalled, discovered[2].Status)
// Sync standalone plugins
err = SyncPlugins("mgmt")
assert.Nil(err)
// Get all available plugins(standalone+context-aware) and verify the status is updated to `installed`
discovered, err = AvailablePlugins("mgmt")
assert.Nil(err)
assert.Equal(3, len(discovered))
assert.Equal("cluster", discovered[0].Name)
assert.Equal(common.PluginScopeContext, discovered[0].Scope)
assert.Equal(common.PluginStatusInstalled, discovered[0].Status)
assert.Equal("management-cluster", discovered[1].Name)
assert.Equal(common.PluginScopeStandalone, discovered[1].Scope)
assert.Equal(common.PluginStatusInstalled, discovered[1].Status)
assert.Equal("login", discovered[2].Name)
assert.Equal(common.PluginScopeStandalone, discovered[2].Scope)
assert.Equal(common.PluginStatusInstalled, discovered[2].Status)
}
func Test_getInstalledButNotDiscoveredStandalonePlugins(t *testing.T) {
assert := assert.New(t)
availablePlugins := []plugin.Discovered{plugin.Discovered{Name: "fake1", DiscoveryType: "oci", RecommendedVersion: "v1.0.0", Status: common.PluginStatusInstalled}}
installedPluginDesc := []cliv1alpha1.PluginDescriptor{cliv1alpha1.PluginDescriptor{Name: "fake2", Version: "v2.0.0", Discovery: "local"}}
// If installed plugin is not part of available(discovered) plugins
plugins := getInstalledButNotDiscoveredStandalonePlugins(availablePlugins, installedPluginDesc)
assert.Equal(len(plugins), 1)
assert.Equal("fake2", plugins[0].Name)
assert.Equal("v2.0.0", plugins[0].RecommendedVersion)
assert.Equal(common.PluginStatusInstalled, plugins[0].Status)
// If installed plugin is part of available(discovered) plugins and provided available plugin is already marked as `installed`
installedPluginDesc = append(installedPluginDesc, cliv1alpha1.PluginDescriptor{Name: "fake1", Version: "v1.0.0", Discovery: "local"})
plugins = getInstalledButNotDiscoveredStandalonePlugins(availablePlugins, installedPluginDesc)
assert.Equal(len(plugins), 1)
assert.Equal("fake2", plugins[0].Name)
assert.Equal("v2.0.0", plugins[0].RecommendedVersion)
assert.Equal(common.PluginStatusInstalled, plugins[0].Status)
// If installed plugin is part of available(discovered) plugins and provided available plugin is already marked as `not installed`
// then test the availablePlugin status gets updated to `installed`
availablePlugins[0].Status = common.PluginStatusNotInstalled
plugins = getInstalledButNotDiscoveredStandalonePlugins(availablePlugins, installedPluginDesc)
assert.Equal(len(plugins), 1)
assert.Equal("fake2", plugins[0].Name)
assert.Equal("v2.0.0", plugins[0].RecommendedVersion)
assert.Equal(common.PluginStatusInstalled, plugins[0].Status)
assert.Equal(common.PluginStatusInstalled, availablePlugins[0].Status)
// If installed plugin is part of available(discovered) plugins and versions installed is different than discovered version
availablePlugins[0].Status = common.PluginStatusNotInstalled
availablePlugins[0].RecommendedVersion = "v4.0.0"
plugins = getInstalledButNotDiscoveredStandalonePlugins(availablePlugins, installedPluginDesc)
assert.Equal(len(plugins), 1)
assert.Equal("fake2", plugins[0].Name)
assert.Equal("v2.0.0", plugins[0].RecommendedVersion)
assert.Equal(common.PluginStatusInstalled, plugins[0].Status)
assert.Equal(common.PluginStatusInstalled, availablePlugins[0].Status)
}
func Test_setAvailablePluginsStatus(t *testing.T) {
assert := assert.New(t)
availablePlugins := []plugin.Discovered{plugin.Discovered{Name: "fake1", DiscoveryType: "oci", RecommendedVersion: "v1.0.0", Status: common.PluginStatusNotInstalled}}
installedPluginDesc := []cliv1alpha1.PluginDescriptor{cliv1alpha1.PluginDescriptor{Name: "fake2", Version: "v2.0.0", Discovery: "local"}}
// If installed plugin is not part of available(discovered) plugins
setAvailablePluginsStatus(availablePlugins, installedPluginDesc)
assert.Equal(len(availablePlugins), 1)
assert.Equal("fake1", availablePlugins[0].Name)
assert.Equal("v1.0.0", availablePlugins[0].RecommendedVersion)
assert.Equal(common.PluginStatusNotInstalled, availablePlugins[0].Status)
// If installed plugin is part of available(discovered) plugins and provided available plugin is already marked as `installed`
installedPluginDesc = append(installedPluginDesc, cliv1alpha1.PluginDescriptor{Name: "fake1", Version: "v1.0.0", Discovery: "local"})
setAvailablePluginsStatus(availablePlugins, installedPluginDesc)
assert.Equal(len(availablePlugins), 1)
assert.Equal("fake1", availablePlugins[0].Name)
assert.Equal("v1.0.0", availablePlugins[0].RecommendedVersion)
assert.Equal(common.PluginStatusInstalled, availablePlugins[0].Status)
// If installed plugin is part of available(discovered) plugins and versions installed is different than discovered version
availablePlugins[0].Status = common.PluginStatusNotInstalled
availablePlugins[0].RecommendedVersion = "v3.0.0"
setAvailablePluginsStatus(availablePlugins, installedPluginDesc)
assert.Equal(len(availablePlugins), 1)
assert.Equal("fake1", availablePlugins[0].Name)
assert.Equal("v3.0.0", availablePlugins[0].RecommendedVersion)
assert.Equal(common.PluginStatusUpdateAvailable, availablePlugins[0].Status)
}
func mockInstallPlugin(assert *assert.Assertions, server, name, version string) { //nolint:unparam
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
err := InstallPlugin(server, name, version)
assert.Nil(err)
}
func fakeExecCommand(command string, args ...string) *exec.Cmd {
// get plugin name based on the command
// command path is of the form `path/to/plugin-root-directory/login/v0.2.0`
pluginName := filepath.Base(filepath.Dir(command))
testCase := "install-" + pluginName
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...) //nolint:gosec
tc := "TEST_CASE=" + testCase
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1", tc}
return cmd
}
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
args := os.Args
for len(args) > 0 {
if args[0] == "--" {
args = args[1:]
break
}
args = args[1:]
}
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "No command\n")
os.Exit(2)
}
switch os.Getenv("TEST_CASE") {
case testcaseInstallCluster:
out := `{"name":"cluster","description":"Kubernetes cluster operations","version":"v0.2.0","buildSHA":"c2dbd15","digest":"","group":"Run","docURL":"","completionType":0,"aliases":["cl","clusters"],"installationPath":"","discovery":"","scope":"","status":""}`
fmt.Fprint(os.Stdout, out)
case testcaseInstallLogin:
out := `{"name":"login","description":"Login to the platform","version":"v0.2.0","buildSHA":"c2dbd15","digest":"","group":"System","docURL":"","completionType":0,"aliases":["lo","logins"],"installationPath":"","discovery":"","scope":"","status":""}`
fmt.Fprint(os.Stdout, out)
case testcaseInstallManagementCluster:
out := `{"name":"management-cluster","description":"Management cluster operations","version":"v0.2.0","buildSHA":"c2dbd15","digest":"","group":"System","docURL":"","completionType":0,"aliases":["lo","logins"],"installationPath":"","discovery":"","scope":"","status":""}`
fmt.Fprint(os.Stdout, out)
case testcaseInstallNotexists:
out := ``
fmt.Fprint(os.Stdout, out)
}
}
func setupLocalDistoForTesting() func() {
tmpDir, err := os.MkdirTemp(os.TempDir(), "")
if err != nil {
log.Fatal(err, "unable to create temporary directory")
}
config.DefaultStandaloneDiscoveryType = "local"
config.DefaultStandaloneDiscoveryLocalPath = "default"
common.DefaultPluginRoot = filepath.Join(tmpDir, "plugin-root")
common.DefaultLocalPluginDistroDir = filepath.Join(tmpDir, "distro")
common.DefaultCacheDir = filepath.Join(tmpDir, "cache")
tkgConfigFile := filepath.Join(tmpDir, "tanzu_config.yaml")
os.Setenv("TANZU_CONFIG", tkgConfigFile)
err = copy.Copy(filepath.Join("test", "local"), common.DefaultLocalPluginDistroDir)
if err != nil {
log.Fatal(err, "Error while setting local distro for testing")
}
err = copy.Copy(filepath.Join("test", "config.yaml"), tkgConfigFile)
if err != nil {
log.Fatal(err, "Error while coping tanzu config file for testing")
}
return func() {
os.RemoveAll(tmpDir)
}
}
| [
"\"GO_WANT_HELPER_PROCESS\"",
"\"TEST_CASE\""
]
| []
| [
"GO_WANT_HELPER_PROCESS",
"TEST_CASE"
]
| [] | ["GO_WANT_HELPER_PROCESS", "TEST_CASE"] | go | 2 | 0 | |
fatf/__init__.py | """
FAT-Forensics
=============
FAT-Forensics is a Python module integrating a variety of fairness,
accountability (security, privacy) and transparency (explainability,
interpretability) approaches to assess social impact of artificial
intelligence systems.
"""
# Author: Kacper Sokol <[email protected]>
# License: new BSD
from typing import Optional
import logging
import os
import re
import sys
import warnings
# Author and license information
__author__ = 'Kacper Sokol'
__email__ = '[email protected]'
__license__ = 'new BSD'
# The current package version
__version__ = '0.0.2'
__all__ = ['setup_warning_filters', 'setup_random_seed']
# Set up logging; enable logging of level INFO and higher
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
_logger_handler = logging.StreamHandler() # pylint: disable=invalid-name
_logger_formatter = logging.Formatter( # pylint: disable=invalid-name
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%y-%b-%d %H:%M:%S')
_logger_handler.setFormatter(_logger_formatter)
logger.addHandler(_logger_handler)
if os.environ.get('PYTEST_IN_PROGRESS', None) is None:
logger.setLevel(logging.INFO) # pragma: nocover
else:
logger.setLevel(logging.NOTSET)
# Redirect warnings to the logger module
# logging.captureWarnings(True)
# py_warnings = logging.getLogger('py.warnings')
# py_warnings.addHandler(_logger_handler)
# py_warnings.setLevel(logging.INFO)
def setup_warning_filters():
"""
Sets up desired warning filters.
If the warning filters are not specified on the command line or via
the system variable make sure that :class:`DeprecationWarning` and
:class:`ImportWarning` raised by this this package always get printed.
The warning settings used by pytest can be found in pytest.ini, where in
addition to these two warnings :class:`PendingDeprecationWarning` is
enabled as well.
This functionality is tested by test_warnings_emission1() and
test_warnings_emission2() functions in fatf.tests.test_warning_filters
module.
"""
if not sys.warnoptions:
warnings.filterwarnings(
'always',
category=DeprecationWarning,
module=r'^{0}\.'.format(re.escape(__name__)))
warnings.filterwarnings(
'always',
category=ImportWarning,
module=r'^{0}\.'.format(re.escape(__name__)))
else:
logger.info('External warning filters are being used.')
if 'PYTEST_IN_PROGRESS' not in os.environ:
setup_warning_filters() # pragma: no cover
# This function is tested in fatf.tests.test_rngs_seeding
def setup_random_seed(seed: Optional[int] = None) -> None:
"""
Sets up Python's and numpy's random seed.
Fixture for the tests to assure globally controllable seeding of random
number generators in both Python (:func:`random.seed`) and ``numpy``
(``numpy.random.seed``). The seed is taken either from ``FATF_SEED``
system variable or from the ``seed`` input parameter; if neither of
the two is given, it is sampled uniformly from 0--2147483647 range.
.. note::
If both ``FATF_SEED`` system variable and ``seed`` input parameter are
given, the ``seed`` parameter takes the precedence.
This function loggs (``info``) the origin of the random seed and its value.
Parameters
----------
seed : integer, optional (default=None)
.. versionadded:: 0.0.2
An integer in 0--2147483647 range used to seed Python's and numpy's
random number generator.
Raises
------
TypeError
The ``seed`` input parameter is not an integer.
ValueError
The ``seed`` input parameter is outside of the allowed 0--2147483647
range. The random seed retrieved from the ``FATF_SEED`` system variable
is either outside of the allowed range or cannot be parsed as an
integer.
"""
import numpy as np
import random
lower_bound = 0
upper_bound = 2147483647
if seed is None:
# It could have been provided in the environment
_random_seed_os = os.environ.get('FATF_SEED', None)
if _random_seed_os is not None:
# Random seed given as a system variable
_random_seed_os = _random_seed_os.strip()
if _random_seed_os.isdigit():
_random_seed = int(_random_seed_os)
if _random_seed < lower_bound or _random_seed > upper_bound:
raise ValueError('The random seed retrieved from the '
'FATF_SEED system variable ({}) is '
'outside of the allowed 0--2147483647 '
'range.'.format(_random_seed))
logger.info('Seeding RNGs using the system variable.')
else:
raise ValueError('The random seed retrieved from the '
'FATF_SEED system variable ({}) '
'cannot be parsed as a non-negative '
'integer.'.format(_random_seed_os))
else:
# No user-defined random seed -- generate randomly
_random_seed = int(np.random.uniform() * (2**31 - 1))
logger.info('Seeding RNGs at random.')
else:
if isinstance(seed, int):
if seed < lower_bound or seed > upper_bound:
raise ValueError('The seed parameter is outside of the '
'allowed 0--2147483647 range.')
_random_seed = seed
logger.info('Seeding RNGs using the input parameter.')
else:
raise TypeError('The seed parameter is not an integer.')
logger.info('Seeding RNGs with %r.', _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| []
| []
| [
"PYTEST_IN_PROGRESS",
"FATF_SEED"
]
| [] | ["PYTEST_IN_PROGRESS", "FATF_SEED"] | python | 2 | 0 | |
deploy/cmd/nomadic/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"sort"
"sync"
"sync/atomic"
"github.com/urfave/cli/v2"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/semconv/v1.4.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"github.com/mjm/pi-tools/apps"
deploypb "github.com/mjm/pi-tools/deploy/proto/deploy"
"github.com/mjm/pi-tools/pkg/nomadic"
nomadicpb "github.com/mjm/pi-tools/pkg/nomadic/proto/nomadic"
"github.com/mjm/pi-tools/pkg/nomadic/service/nomadicservice"
"github.com/mjm/pi-tools/pkg/spanerr"
)
var tracer = otel.Tracer("github.com/mjm/pi-tools/deploy/cmd/nomadic")
func main() {
var tp *sdktrace.TracerProvider
app := &cli.App{
Name: "nomadic",
Authors: []*cli.Author{
{
Name: "Matt Moriarity",
Email: "[email protected]",
},
},
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "debug-tracing",
},
&cli.StringFlag{
Name: "trace-id",
},
&cli.StringFlag{
Name: "parent-span-id",
},
},
Before: func(c *cli.Context) error {
traceIDStr := c.String("trace-id")
parentSpanIDStr := c.String("parent-span-id")
if traceIDStr == "" || parentSpanIDStr == "" {
return nil
}
traceID, err := trace.TraceIDFromHex(traceIDStr)
if err != nil {
return err
}
parentSpanID, err := trace.SpanIDFromHex(parentSpanIDStr)
if err != nil {
return err
}
sc := trace.NewSpanContext(trace.SpanContextConfig{
TraceID: traceID,
SpanID: parentSpanID,
TraceFlags: trace.FlagsSampled,
})
c.Context = trace.ContextWithRemoteSpanContext(c.Context, sc)
if c.Bool("debug-tracing") {
log.Printf("Setting up stdout exporter")
exporter, err := stdouttrace.New()
if err != nil {
return fmt.Errorf("creating stdout exporter: %w", err)
}
tp = sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exporter))
otel.SetTracerProvider(tp)
} else {
hostIP := os.Getenv("HOST_IP")
exporter, err := otlptracegrpc.New(
context.Background(),
otlptracegrpc.WithInsecure(),
otlptracegrpc.WithEndpoint(fmt.Sprintf("%s:4317", hostIP)))
if err != nil {
return fmt.Errorf("creating otlp exporter: %w", err)
}
r, err := resource.New(context.Background(), resource.WithAttributes(
semconv.ServiceNamespaceKey.String(os.Getenv("NOMAD_NAMESPACE")),
semconv.ServiceNameKey.String("nomadic"),
semconv.ServiceInstanceIDKey.String(os.Getenv("NOMAD_ALLOC_ID")),
semconv.ContainerNameKey.String(os.Getenv("NOMAD_TASK_NAME")),
semconv.HostNameKey.String(os.Getenv("HOSTNAME")),
semconv.HostIDKey.String(os.Getenv("NOMAD_CLIENT_ID"))))
if err != nil {
return fmt.Errorf("creating telemetry resource: %w", err)
}
tp = sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exporter),
sdktrace.WithResource(r))
otel.SetTracerProvider(tp)
}
return nil
},
After: func(c *cli.Context) error {
if tp != nil {
log.Printf("Shutting down exporter")
return tp.Shutdown(context.Background())
}
return nil
},
Commands: []*cli.Command{
{
Name: "install",
Aliases: []string{"i"},
Action: func(c *cli.Context) error {
if c.NArg() < 1 {
cli.ShowCommandHelpAndExit(c, "install", 1)
return nil
}
clients, err := nomadic.DefaultClients()
if err != nil {
return err
}
// TODO support multiple apps
appName := c.Args().First()
app := nomadic.Find(appName)
if app == nil {
return fmt.Errorf("Unknown application name %q", appName)
}
log.Printf("Installing %s", appName)
ctx, span := tracer.Start(c.Context, "install",
trace.WithAttributes(
attribute.String("app.name", app.Name())))
defer span.End()
if err := app.Install(ctx, clients); err != nil {
return err
}
return nil
},
},
{
Name: "uninstall",
Aliases: []string{"u"},
Action: func(c *cli.Context) error {
if c.NArg() < 1 {
cli.ShowCommandHelpAndExit(c, "uninstall", 1)
return nil
}
clients, err := nomadic.DefaultClients()
if err != nil {
return err
}
// TODO support multiple apps
appName := c.Args().First()
app := nomadic.Find(appName)
if app == nil {
return fmt.Errorf("Unknown application name %q", appName)
}
log.Printf("Uninstalling %s", appName)
ctx, span := tracer.Start(c.Context, "uninstall",
trace.WithAttributes(
attribute.String("app.name", app.Name())))
defer span.End()
if err := app.Uninstall(ctx, clients); err != nil {
return err
}
return nil
},
},
{
Name: "list",
Aliases: []string{"l", "ls"},
Action: func(c *cli.Context) error {
if c.NArg() > 0 {
cli.ShowCommandHelpAndExit(c, "list", 1)
return nil
}
var appNames []string
for appName := range nomadic.Registry() {
appNames = append(appNames, appName)
}
sort.Strings(appNames)
for _, appName := range appNames {
fmt.Println(appName)
}
return nil
},
},
{
Name: "images",
Action: func(c *cli.Context) error {
if c.NArg() > 0 {
cli.ShowCommandHelpAndExit(c, "images", 1)
return nil
}
for _, imageURI := range nomadic.RegisteredImageURIs() {
fmt.Println(imageURI)
}
return nil
},
},
{
Name: "perform-deploy",
Flags: []cli.Flag{
&cli.PathFlag{
Name: "server-socket-path",
Required: true,
},
},
Action: func(c *cli.Context) error {
serverSocketPath := c.Path("server-socket-path")
ctx, span := tracer.Start(c.Context, "perform-deploy",
trace.WithAttributes(
attribute.String("server.socket_path", serverSocketPath)))
defer span.End()
eventCh := make(chan *deploypb.ReportEvent, 10)
ctx = nomadic.WithEvents(ctx, nomadic.NewChannelEventReporter(eventCh))
doneCh := make(chan struct{})
clients, err := nomadic.DefaultClients()
if err != nil {
return spanerr.RecordError(ctx, err)
}
conn, err := grpc.DialContext(ctx, "unix://"+serverSocketPath,
grpc.WithInsecure(),
grpc.WithBlock())
if err != nil {
return spanerr.RecordError(ctx, err)
}
client := nomadicpb.NewNomadicClient(conn)
stream, err := client.StreamEvents(c.Context)
if err != nil {
return spanerr.RecordError(ctx, err)
}
var wg sync.WaitGroup
var errored int32
for appName, app := range nomadic.Registry() {
wg.Add(1)
go func(appName string, app nomadic.Deployable) {
defer wg.Done()
ctx, span := tracer.Start(ctx, "Deployable.Install",
trace.WithAttributes(
attribute.String("app.name", app.Name())))
defer span.End()
if err := app.Install(ctx, clients); err != nil {
_ = spanerr.RecordError(ctx, err)
nomadic.Events(ctx).Error("App %s failed to install", appName, nomadic.WithError(err))
atomic.AddInt32(&errored, 1)
return
}
}(appName, app)
}
go func() {
for evt := range eventCh {
stream.Send(&nomadicpb.StreamEventsRequest{
Event: evt,
})
}
close(doneCh)
}()
wg.Wait()
close(eventCh)
<-doneCh
if _, err := stream.CloseAndRecv(); err != nil {
return spanerr.RecordError(ctx, err)
}
if errored != 0 {
err := fmt.Errorf("one or more apps failed to install")
return spanerr.RecordError(ctx, err)
}
return nil
},
},
{
Name: "test-server",
Action: func(c *cli.Context) error {
binaryPath := c.Args().First()
doneCh := make(chan struct{})
eventCh := make(chan *deploypb.ReportEvent)
go func() {
for evt := range eventCh {
log.Println(evt)
}
close(doneCh)
}()
deployErr := nomadicservice.DeployAll(c.Context, binaryPath, eventCh)
<-doneCh
if deployErr == nil {
log.Println("All apps finished deploying successfully")
}
return nil
},
},
},
}
apps.Load()
ctx := nomadic.WithEvents(context.Background(), nomadic.NewLoggingEventReporter())
if err := app.RunContext(ctx, os.Args); err != nil {
log.Fatal(err)
}
}
| [
"\"HOST_IP\"",
"\"NOMAD_NAMESPACE\"",
"\"NOMAD_ALLOC_ID\"",
"\"NOMAD_TASK_NAME\"",
"\"HOSTNAME\"",
"\"NOMAD_CLIENT_ID\""
]
| []
| [
"NOMAD_TASK_NAME",
"NOMAD_NAMESPACE",
"HOSTNAME",
"HOST_IP",
"NOMAD_ALLOC_ID",
"NOMAD_CLIENT_ID"
]
| [] | ["NOMAD_TASK_NAME", "NOMAD_NAMESPACE", "HOSTNAME", "HOST_IP", "NOMAD_ALLOC_ID", "NOMAD_CLIENT_ID"] | go | 6 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ImageSearcher.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
aries_basic_controller/utils.py | import functools
import json
import os
import sys
from timeit import default_timer
import prompt_toolkit
from prompt_toolkit.application import run_in_terminal
#from prompt_toolkit.eventloop.defaults import use_asyncio_event_loop
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts import ProgressBar
import pygments
from pygments.filter import Filter
from pygments.lexer import Lexer
from pygments.lexers.data import JsonLdLexer
from prompt_toolkit.formatted_text import FormattedText, PygmentsTokens
COLORIZE = bool(os.getenv("COLORIZE", True))
class PrefixFilter(Filter):
def __init__(self, **options):
Filter.__init__(self, **options)
self.prefix = options.get("prefix")
def lines(self, stream):
line = []
for ttype, value in stream:
if "\n" in value:
parts = value.split("\n")
value = parts.pop()
for part in parts:
line.append((ttype, part))
line.append((ttype, "\n"))
yield line
line = []
line.append((ttype, value))
if line:
yield line
def filter(self, lexer, stream):
if isinstance(self.prefix, str):
prefix = ((pygments.token.Generic, self.prefix),)
elif self.prefix:
prefix = self.prefix
else:
prefix = ()
for line in self.lines(stream):
yield from prefix
yield from line
def print_lexer(
body: str, lexer: Lexer, label: str = None, prefix: str = None, indent: int = None
):
if COLORIZE:
prefix_str = prefix + " " if prefix else ""
if prefix_str or indent:
prefix_body = prefix_str + " " * (indent or 0)
lexer.add_filter(PrefixFilter(prefix=prefix_body))
tokens = list(pygments.lex(body, lexer=lexer))
if label:
fmt_label = [("fg:ansimagenta", label)]
if prefix_str:
fmt_label.insert(0, ("", prefix_str))
print_formatted(FormattedText(fmt_label))
print_formatted(PygmentsTokens(tokens))
else:
print_ext(body, label=label, prefix=prefix)
def print_json(data, label: str = None, prefix: str = None, indent: int = 2):
if isinstance(data, str):
data = json.loads(data)
data = json.dumps(data, indent=2)
prefix_str = prefix or ""
print_lexer(data, JsonLdLexer(), label=label, prefix=prefix_str, indent=indent)
def print_formatted(*args, **kwargs):
prompt_toolkit.print_formatted_text(*args, **kwargs)
def print_ext(
*msg,
color: str = None,
label: str = None,
prefix: str = None,
indent: int = None,
**kwargs,
):
prefix_str = prefix or ""
if indent:
prefix_str += " " * indent
if color and COLORIZE:
msg = [(color, " ".join(map(str, msg)))]
if prefix_str:
msg.insert(0, ("", prefix_str + " "))
if label:
msg.insert(0, ("fg:ansimagenta", label + "\n"))
print_formatted(FormattedText(msg), **kwargs)
return
if label:
print(label, **kwargs)
if prefix_str:
msg = (prefix_str, *msg)
print(*msg, **kwargs)
def output_reader(handle, callback, *args, **kwargs):
for line in iter(handle.readline, b""):
if not line:
break
run_in_terminal(functools.partial(callback, line, *args))
def log_msg(*msg, color="fg:ansimagenta", **kwargs):
run_in_terminal(lambda: print_ext(*msg, color=color, **kwargs))
def log_json(data, **kwargs):
run_in_terminal(lambda: print_json(data, **kwargs))
def log_status(status: str, **kwargs):
log_msg(f"\n{status}", color="bold", **kwargs)
def flatten(args):
for arg in args:
if isinstance(arg, (list, tuple)):
yield from flatten(arg)
else:
yield arg
def prompt_init():
if hasattr(prompt_init, "_called"):
return
prompt_init._called = True
use_asyncio_event_loop()
async def prompt(*args, **kwargs):
prompt_init()
with patch_stdout():
try:
while True:
tmp = await prompt_toolkit.prompt(*args, async_=True, **kwargs)
if tmp:
break
return tmp
except EOFError:
return None
async def prompt_loop(*args, **kwargs):
while True:
option = await prompt(*args, **kwargs)
yield option
class DurationTimer:
def __init__(self, label: str = None, callback=None):
self.callback = callback
self.duration = None
self.label = label
self.last_error = None
self.total = 0.0
self.init_time = self.now()
self.start_time = None
self.stop_time = None
self.running = False
@classmethod
def now(cls):
return default_timer()
def start(self):
self.start_time = self.now()
self.running = True
def stop(self):
if not self.running:
return
self.stop_time = self.now()
self.duration = self.stop_time - self.start_time
self.running = False
self.total += self.duration
if self.callback:
self.callback(self)
def cancel(self):
self.running = False
def reset(self):
self.duration = None
self.total = 0.0
self.last_error = None
restart = False
if self.running:
self.stop()
restart = True
self.start_time = None
self.stop_time = None
if restart:
self.start()
def __enter__(self):
self.start()
return self
def __exit__(self, err_type, err_value, err_tb):
self.last_error = err_value
self.stop()
def log_timer(label: str, show: bool = True, logger=None, **kwargs):
logger = logger or log_msg
cb = (
(
lambda timer: timer.last_error
or logger(timer.label, f"{timer.duration:.2f}s", **kwargs)
)
if show
else None
)
return DurationTimer(label, cb)
def progress(*args, **kwargs):
return ProgressBar(*args, **kwargs)
def require_indy():
try:
from indy.libindy import _cdll
_cdll()
except ImportError:
print("python3-indy module not installed")
sys.exit(1)
except OSError:
print("libindy shared library could not be loaded")
sys.exit(1)
# Used to extract a DID from a schema or credential definition id
def extract_did(id):
split = id.split(":")
if len(split) > 3:
return split[0]
else:
raise Exception(f"ID {id} is not is the correct format")
def get_schema_details(schema_id):
details = schema_id.split(":")
if len(details) == 4:
return {
"schema_id": schema_id,
"schema_name": details[2],
"schema_version": details[3],
"schema_issuer_did": details[0]
}
else:
raise Exception(f"ID {id} is not is the correct") | []
| []
| [
"COLORIZE"
]
| [] | ["COLORIZE"] | python | 1 | 0 | |
app.py | import os
import math
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo, pymongo
from flask_paginate import get_page_args
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
# CONFIGURATION
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
# GLOBAL
recipes_coll = mongo.db.recipes
users_coll = mongo.db.users
prep_coll = mongo.db.prep
ingredients_coll = mongo.db.ingredients
# ALL RECIPES
@app.route("/")
@app.route("/recipes")
def recipes():
"""
READ
Displays all recipes in the order they were
created with the latest being shown first.
Pagination limits the number of recipes displayed.
"""
# set title to display in browser tab
title = 'Recipes'
# code for pagination modified from irinatu17:
# https://github.com/irinatu17/MyCookBook
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
# sort recipes by newest first
recipes = recipes_coll.find().sort('_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
# total of recipes in database
number_of_all_rec = recipes.count()
pages = range(1, int(math.ceil(number_of_all_rec / limit_per_page)) + 1)
return render_template(
"index.html",
title=title,
recipes=recipes,
current_page=current_page,
pages=pages,
number_of_all_rec=number_of_all_rec)
# SEARCH
@app.route("/search", methods=["GET", "POST"])
def search():
"""
READ
Searches recipes using the
title and ingredients.
"""
# set title to display in browser tab
title = 'Search'
# code for pagination modified from irinatu17:
# https://github.com/irinatu17/MyCookBook
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
if request.method == "POST":
# used when a search is made
query = request.form.get('query')
else:
# used when a pagination link is clicked following a search
query = request.args.get('query')
sort_by = "Sort By"
# Search results
recipes = recipes_coll.find(
{"$text": {"$search": str(
query)}}).sort('_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
# Number of Search results
number_of_all_rec = recipes.count()
print(number_of_all_rec)
# Number of Pages
pages = range(1, int(math.ceil(number_of_all_rec / limit_per_page)) + 1)
return render_template(
"index.html",
title=title,
recipes=recipes,
current_page=current_page,
pages=pages,
number_of_all_rec=number_of_all_rec,
query=query,
sort_by=sort_by)
# SORT
@app.route("/sort", methods=["GET", "POST"])
def sort():
"""
READ
Sorts recipes by user preference
"""
# set title to display in browser tab
title = 'Recipes Sort'
# pagination
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
if request.method == "POST":
# used when a sort parameter is selected
query = request.args.get('query')
sort_by = request.form.get('sort_by')
else:
# used when a pagination link is clicked following a sort
query = request.args.get('query')
sort_by = request.args.get('sort_by')
# sort recipes by user preference
# selected preference is displayed in the sort label
if sort_by == 'A-Z':
recipes = mongo.db.recipes.find().sort('recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif sort_by == 'Z-A':
recipes = mongo.db.recipes.find().sort('recipe_name', -1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif sort_by == 'Oldest':
recipes = mongo.db.recipes.find().sort('_id', pymongo.ASCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = mongo.db.recipes.find().sort('_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
# total of recipes
number_of_all_rec = recipes.count()
# number of pages
pages = range(1, int(math.ceil(number_of_all_rec / limit_per_page)) + 1)
return render_template(
"index.html",
title=title,
recipes=recipes,
current_page=current_page,
pages=pages,
number_of_all_rec=number_of_all_rec,
query=query,
sort_by=sort_by)
# SORT QUERY
@app.route("/sort_query", methods=["GET", "POST"])
def sort_query():
"""
READ
Sorts ingredients by user preference.
Used following a search
"""
# set title to display in browser tab
title = 'Recipes Search Sort'
# pagination
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
if request.method == "POST":
# used when a sort parameter is selected following a search
query = request.args.get('query')
sort_by = request.form.get('sort_by')
else:
# used when a pagination link is clicked
query = request.args.get('query')
sort_by = request.args.get('sort_by')
# sort recipes by user preference
if sort_by == 'A-Z':
recipes = recipes_coll.find(
{"$text": {"$search": str(
query)}}).sort('recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif sort_by == 'Z-A':
recipes = recipes_coll.find(
{"$text": {"$search": str(
query)}}).sort('recipe_name', -1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif sort_by == 'Oldest':
recipes = recipes_coll.find(
{"$text": {"$search": str(
query)}}).sort('_id', pymongo.ASCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find(
{"$text": {"$search": str(
query)}}).sort('_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
# total of recipes
number_of_all_rec = recipes.count()
pages = range(1, int(math.ceil(number_of_all_rec / limit_per_page)) + 1)
return render_template(
"index.html",
title=title,
recipes=recipes,
current_page=current_page,
pages=pages,
number_of_all_rec=number_of_all_rec,
query=query,
sort_by=sort_by)
# SORT FILTER
@app.route("/sort_filter", methods=["GET", "POST"])
def sort_filter():
"""
READ
Sorts filter by user preference.
Used following a filter
"""
# set title to display in browser tab
title = 'Recipes Sort Filter'
# pagination
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
if request.method == "POST":
# used when a sort parameter is selected following a search
sort_by = request.form.get('sort_by')
filter_by = request.args.get('filter_by')
else:
# used when a pagination link is clicked
sort_by = request.args.get('sort_by')
filter_by = request.args.get('filter_by')
# sort recipes by user preference and chosen filter
if sort_by == 'A-Z':
if filter_by == "Cooking":
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Baking":
recipes = recipes_coll.find(
{"category_name": "baking"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Snacks":
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif sort_by == 'Z-A':
if filter_by == "Cooking":
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort(
'recipe_name', -1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Baking":
recipes = recipes_coll.find(
{"category_name": "baking"}).sort(
'recipe_name', -1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Snacks":
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif sort_by == 'Oldest':
if filter_by == "Cooking":
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort(
'_id', pymongo.ASCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Baking":
recipes = recipes_coll.find(
{"category_name": "baking"}).sort(
'_id', pymongo.ASCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Snacks":
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
if filter_by == "Cooking":
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort(
'_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Baking":
recipes = recipes_coll.find(
{"category_name": "baking"}).sort(
'_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Snacks":
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
# total of recipes
number_of_all_rec = recipes.count()
pages = range(1, int(math.ceil(number_of_all_rec / limit_per_page)) + 1)
return render_template(
"index.html",
title=title,
recipes=recipes,
current_page=current_page,
pages=pages,
number_of_all_rec=number_of_all_rec,
sort_by=sort_by,
filter_by=filter_by)
# FILTER
@app.route("/filter", methods=["GET", "POST"])
def filter():
"""
READ
filter recipes by user preference
"""
# set title to display in browser tab
title = 'Recipes Filter'
# pagination
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
if request.method == "POST":
# used when a filter parameter is selected
query = request.args.get('query')
filter_by = request.form.get('filter_by')
else:
# used when a pagination link is clicked
query = request.args.get('query')
filter_by = request.args.get('filter_by')
# filter recipes by user preference
# selected preference is displayed in the sort label
if filter_by == 'Baking':
recipes = recipes_coll.find(
{"category_name": "baking"}).sort('_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == 'Snacks':
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort('_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == 'Cooking':
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort('_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort('_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
# total of recipes
number_of_all_rec = recipes.count()
# number of pages
pages = range(1, int(math.ceil(number_of_all_rec / limit_per_page)) + 1)
return render_template(
"index.html",
title=title,
recipes=recipes,
current_page=current_page,
pages=pages,
number_of_all_rec=number_of_all_rec,
query=query,
filter_by=filter_by)
# FILTER SORT
@app.route("/filter_sort", methods=["GET", "POST"])
def filter_sort():
"""
READ
Filters ingredients by user preference.
Used following a sort
"""
# set title to display in browser tab
title = 'Recipes Filter Sort'
# pagination
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
if request.method == "POST":
# used when a sort parameter is selected following a search
sort_by = request.args.get('sort_by')
filter_by = request.form.get('filter_by')
else:
# used when a pagination link is clicked
sort_by = request.args.get('sort_by')
filter_by = request.args.get('filter_by')
# sort recipes by user preference
if sort_by == 'A-Z':
if filter_by == "Cooking":
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Baking":
recipes = recipes_coll.find(
{"category_name": "baking"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Snacks":
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif sort_by == 'Z-A':
if filter_by == "Cooking":
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort(
'recipe_name', -1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Baking":
recipes = recipes_coll.find(
{"category_name": "baking"}).sort(
'recipe_name', -1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Snacks":
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif sort_by == 'Oldest':
if filter_by == "Cooking":
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort(
'_id', pymongo.ASCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Baking":
recipes = recipes_coll.find(
{"category_name": "baking"}).sort(
'_id', pymongo.ASCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Snacks":
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
if filter_by == "Cooking":
recipes = recipes_coll.find(
{"category_name": "cooking"}).sort(
'_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Baking":
recipes = recipes_coll.find(
{"category_name": "baking"}).sort(
'_id', pymongo.DESCENDING).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
elif filter_by == "Snacks":
recipes = recipes_coll.find(
{"category_name": "snacks"}).sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
else:
recipes = recipes_coll.find().sort(
'recipe_name', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
# total of recipes
number_of_all_rec = recipes.count()
pages = range(1, int(math.ceil(number_of_all_rec / limit_per_page)) + 1)
return render_template(
"index.html",
title=title,
recipes=recipes,
current_page=current_page,
pages=pages,
number_of_all_rec=number_of_all_rec,
filter_by=filter_by,
sort_by=sort_by)
# ALL USERS
@app.route("/")
@app.route("/users")
def users():
"""
READ
Displays all users. This feature
is only available to the admin.
Pagination limits the number of recipes displayed.
"""
# set title to display in browser tab
# and apply active-link to nav link
title = 'Users'
# pagination
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
# total of users in database
number_of_all_users = users_coll.count()
# Number of Pages
pages = range(1, int(math.ceil(number_of_all_users / limit_per_page)) + 1)
# sort users by alphabetical order
users = users_coll.find().sort('username', 1).skip(
(current_page - 1)*limit_per_page).limit(limit_per_page)
return render_template(
"users.html",
title=title,
users=users,
current_page=current_page,
pages=pages,
number_of_all_users=number_of_all_users)
# VIEW PROFILE
@app.route("/view_profile/<username_view>", methods=["GET", "POST"])
def view_profile(username_view):
"""
READ
Allows users to view other users profiles
"""
if request.method == "POST":
ingredient_name = request.args.get('ingredient_name')
mongo.db.ingredients.remove({"ingredient_name": ingredient_name})
flash("Ingredient Successfully Deleted")
print(ingredient_name)
# grab the user's username from db
user = users_coll.find_one(
{"username": username_view})
username = users_coll.find_one(
{"username": username_view})["username"]
user_recipes = recipes_coll.find({"created_by": username_view})
number_of_user_rec = user_recipes.count()
limit_per_page = 6
current_page = int(request.args.get('current_page', 1))
pages = range(1, int(math.ceil(number_of_user_rec / limit_per_page)) + 1)
# recipes to display in order of latest created
recipes = user_recipes.sort('_id', pymongo.DESCENDING).skip(
(current_page - 1) * limit_per_page).limit(limit_per_page)
ingredients = mongo.db.ingredients.find(
{"created_by": username_view}).sort("ingredient_name", 1)
num_ingredients = ingredients.count()
# Foodgroups and units to edit ingredients
food_groups = mongo.db.food_groups.find().sort("group_name", 1)
units = mongo.db.units.find().sort("unit_name", 1)
# set title to display in browser tab
# and apply active-link to nav link if profile belongs to session user
title = username_view.capitalize()
return render_template(
"view_profile.html",
title=title,
user=user,
recipes=recipes,
ingredients=ingredients,
num_ingredients=num_ingredients,
food_groups=food_groups,
units=units,
username=username,
number_of_user_rec=number_of_user_rec,
user_recipes=user_recipes,
current_page=current_page,
pages=pages)
# INDIVIDUAL RECIPES
@app.route("/single_recipe/<recipe_id>")
def single_recipe(recipe_id):
"""
READ
Displays a single recipe.
"""
# set title to display in browser tab
title = mongo.db.recipes.find_one(
{"_id": ObjectId(recipe_id)})['recipe_name']
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
username = mongo.db.recipes.find_one(
{"_id": ObjectId(recipe_id)})['created_by']
return render_template(
"single_recipe.html",
title=title,
username=username,
recipe=recipe)
# REGISTRATION
@app.route("/register", methods=["GET", "POST"])
def register():
"""
READ
Registers a user providing there
is not already an existing user
using the name provided. If successful
the user is logged in and is directed
to their page, if not they are prompted
to try again.
"""
# set title to display in browser tab
# and apply active-link to nav link
title = 'Registration'
if request.method == "POST":
# check if username already exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("register"))
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# put new user into 'session' cookie
session["user"] = request.form.get("username").lower()
flash("Registration Successful!")
return redirect(url_for("view_profile", username_view=session["user"]))
return render_template(
"register.html",
title=title,)
# LOGIN
@app.route("/login", methods=["GET", "POST"])
def login():
"""
READ
Logs in user provided they provide
correct username and password, if not
they are prompted to try again. If
successful the user is directed to
their profile.
"""
# set title to display in browser tab
# and apply active-link to nav link
title = 'Login'
if request.method == "POST":
# check if username exists
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(existing_user["password"],
request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(request.form.get("username")))
return redirect(
url_for("view_profile", username_view=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or password")
return redirect(url_for("login"))
else:
# username doesn't exist
flash("Incorrect Username and/or password")
return redirect(url_for("login"))
return render_template(
"login.html",
title=title,)
# LOGOUT
@app.route("/logout")
def logout():
"""
READ
Logs out user and redirects them
to the login page.
"""
# remove user from session cookies
flash("You have been logged out")
session.pop("user")
return redirect(url_for(
"login"))
# INSERT RECIPE
@app.route("/insert_recipe", methods=["GET", "POST"])
def insert_recipe():
"""
READ
Inserts new recipe to the database
and redirects user to homepage with
a message to say recipe has been added.
"""
# set title to display in browser tab
# and apply active-link to nav link
title = 'Add Recipe'
if request.method == "POST":
recipe_is_vegetarian = "on" if request.form.get(
"recipe_is_vegetarian") else "off"
recipe_is_vegan = "on" if request.form.get(
"recipe_is_vegan") else "off"
# url validation code modified from paulloy:
# https://github.com/paulloy/whiskey_herald_msp3/blob/master/app.py
# image addresses must end with one of these extensions
allow_exten = ["jpg", "jpeg", "png"]
form_url = str(request.form.get("recipe_image"))
# split the inputted url and check the extension
x = form_url.split(".")
y = x[-1].lower()
# url submitted if it ends with an acceptable suffix
if y == allow_exten[0] or y == allow_exten[1] or y == allow_exten[2]:
recipe_image = request.form.get("recipe_image")
# if not a blank string is submitted
else:
recipe_image = ""
recipe = {
"category_name": request.form.get("category_name"),
"recipe_name": request.form.get("recipe_name"),
"recipe_ingredients": request.form.get("recipe_ingredients"),
"recipe_instructions": request.form.get("recipe_instructions"),
"recipe_image": recipe_image,
"recipe_serves": request.form.get("recipe_serves"),
"recipe_time": request.form.get("recipe_time"),
"recipe_cals": request.form.get("recipe_cals"),
"recipe_description": request.form.get("recipe_description"),
"recipe_is_vegan": recipe_is_vegan,
"recipe_is_vegetarian": recipe_is_vegetarian,
"created_by": session["user"]
}
mongo.db.recipes.insert_one(recipe)
flash("Recipe Successfully Added")
return redirect(url_for(
"recipes",
title=title,))
categories = mongo.db.categories.find().sort("category_name", 1)
ingredients = mongo.db.ingredients.find(
{"created_by": session["user"]}).sort("ingredient_name", 1)
num_ingredients = ingredients.count()
prep = mongo.db.prep.find().sort("prep", 1)
return render_template(
"insert_recipe.html",
categories=categories,
ingredients=ingredients,
num_ingredients=num_ingredients,
title=title,
prep=prep)
# INSERT INGREDIENT
@app.route("/insert_ingredient", methods=["GET", "POST"])
def insert_ingredient():
"""
READ
Inserts new ingredient to the database.
"""
# set title to display in browser tab
# and set active page to apply active-link to nav link
title = 'Units'
if request.method == "POST":
food_groups = mongo.db.food_groups.find().sort("group_name", 1)
units = mongo.db.units.find().sort("unit_name", 1)
# check if ingredient already exists in db
existing_ingredient = mongo.db.ingredients.find_one(
{"ingredient_name": request.form.get("ingredient_name")})
# If ingredient already exists then only Creator may edit it
if existing_ingredient:
if existing_ingredient["created_by"] == session['user']:
mongo.db.ingredients.remove(existing_ingredient)
else:
flash("Ingredient already exists")
return redirect(url_for("units"))
ingredient = {
"ingredient_name": request.form.get("ingredient_name"),
"ingredient_cal": request.form.get("ingredient_cal"),
"group_name": request.form.get("group_name"),
"unit_name": request.form.get("unit_name"),
"created_by": session["user"]
}
mongo.db.ingredients.insert_one(ingredient)
flash("Ingredient Successfully Added")
username = session['user']
return redirect(url_for(
"view_profile",
username_view=username,
title=title))
# EDIT RECIPE
@app.route("/edit_recipe/<recipe_id>", methods=["GET", "POST"])
def edit_recipe(recipe_id):
"""
READ
Edits a recipe and redirects user to
the recipe with a message to say edit
has been successful.
"""
# set title to display in browser tab
title = 'Edit Recipe'
# set active page to apply active-link to nav link
active_page = 'edit'
if request.method == "POST":
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
recipe_is_vegetarian = "on" if request.form.get(
"recipe_is_vegetarian") else "off"
recipe_is_vegan = "on" if request.form.get(
"recipe_is_vegan") else "off"
submit = {
"category_name": request.form.get("category_name"),
"recipe_name": request.form.get("recipe_name"),
"recipe_ingredients": request.form.get("recipe_ingredients"),
"recipe_instructions": request.form.get("recipe_instructions"),
"recipe_image": request.form.get("recipe_image"),
"recipe_serves": request.form.get("recipe_serves"),
"recipe_time": request.form.get("recipe_time"),
"recipe_cals": request.form.get("recipe_cals"),
"recipe_description": request.form.get("recipe_description"),
"recipe_is_vegan": recipe_is_vegan,
"recipe_is_vegetarian": recipe_is_vegetarian,
"created_by": session["user"]
}
mongo.db.recipes.update({"_id": ObjectId(recipe_id)}, submit)
flash("Recipe Successfully Updated")
username = session['user']
return redirect(url_for("view_profile", username_view=username))
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
user_image = mongo.db.recipes.find_one(
{"_id": ObjectId(recipe_id)})["recipe_image"]
categories = mongo.db.categories.find().sort("category_name", 1)
return render_template(
"edit_recipe.html",
title=title,
recipe=recipe,
user_image=user_image,
categories=categories,
active_page=active_page)
# DELETE RECIPE
@app.route("/delete_recipe/<recipe_id>")
def delete_recipe(recipe_id):
"""
READ
Deletes a recipe and redirects user to
the homepage with a message to say recipe
has been deleted.
"""
mongo.db.recipes.remove({"_id": ObjectId(recipe_id)})
flash("Recipe Successfully Deleted")
username = session['user']
if username == 'admin':
return redirect(url_for("recipes"))
return redirect(url_for(
"view_profile",
username_view=username))
# DELETE INGREDIENT
@app.route("/delete_ingredient/<ingredient_id>")
def delete_ingredient(ingredient_id):
"""
READ
Deletes an ingredient and redirects user to
the units page with a message to say recipe
has been deleted. Only for admin use.
"""
mongo.db.ingredients.remove({"_id": ObjectId(ingredient_id)})
flash("Ingredient Successfully Deleted")
username = session['user']
if username == 'admin':
return redirect(url_for("recipes"))
return redirect(url_for(
"view_profile",
username_view=username))
# DELETE USER
@app.route("/delete_user/<username>")
def delete_user(username):
"""
READ
Deletes a user and redirects user to
the users page with a message to say
user has been deleted. Only available
to admin.
"""
mongo.db.users.remove({"username": username})
mongo.db.recipes.remove({"created_by": username})
mongo.db.ingredients.remove({"created_by": username})
flash("Profile Successfully Deleted")
if session["user"] == 'admin':
return redirect(url_for("users"))
else:
session.pop("user")
return redirect(url_for("recipes"))
# UNITS
@app.route("/units")
def units():
"""
READ
Allows users to convert units
"""
# set title to display in browser tab
# and apply active-link to nav link
title = 'Units'
dairy_ingredients = ingredients_coll.find(
{"group_name": "Dairy"}).sort("ingredient_name", 1)
fruit_ingredients = mongo.db.ingredients.find(
{"group_name": "Fruit"}).sort("ingredient_name", 1)
grain_ingredients = mongo.db.ingredients.find(
{"group_name": "Grains"}).sort("ingredient_name", 1)
protein_ingredients = mongo.db.ingredients.find(
{"group_name": "Protein"}).sort("ingredient_name", 1)
spice_ingredients = mongo.db.ingredients.find(
{"group_name": "Oils & Spices"}).sort("ingredient_name", 1)
sweet_ingredients = mongo.db.ingredients.find(
{"group_name": "Sweet"}).sort("ingredient_name", 1)
veg_ingredients = mongo.db.ingredients.find(
{"group_name": "Veg"}).sort("ingredient_name", 1)
food_groups = mongo.db.food_groups.find().sort("group_name", 1)
units = mongo.db.units.find().sort("unit_name", 1)
return render_template(
"units.html",
title=title,
food_groups=food_groups,
units=units,
dairy_ingredients=dairy_ingredients,
fruit_ingredients=fruit_ingredients,
grain_ingredients=grain_ingredients,
spice_ingredients=spice_ingredients,
sweet_ingredients=sweet_ingredients,
veg_ingredients=veg_ingredients,
protein_ingredients=protein_ingredients)
# 404 ERROR
@app.errorhandler(404)
def error_404(error):
'''
READ
Handles 404 error (page not found)
'''
return render_template('error/404.html', error=True,
title="Page not found"), 404
# 500 ERROR
@app.errorhandler(500)
def error_500(error):
'''
READ
Handles 500 error (internal server error)
'''
return render_template('error/500.html', error=True,
title="Internal Server Error"), 500
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
| []
| []
| [
"MONGO_DBNAME",
"PORT",
"MONGO_URI",
"IP",
"SECRET_KEY"
]
| [] | ["MONGO_DBNAME", "PORT", "MONGO_URI", "IP", "SECRET_KEY"] | python | 5 | 0 | |
src/prediction_knn.py |
import math
from sklearn import neighbors
import os
from os import environ
import os.path
import pickle
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
import sys
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
THRESHOLD = os.getenv('THRESHOLD', 'TRUE')
def predict_frame(X_img_frame, knn_clf=None, model_path=None, distance_threshold=0.6, model='hog'):
"""
Recognizes faces in given image using a trained KNN classifier
:param X_img_frame: Numpy array image
:param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
:param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
:param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
"""
if knn_clf is None and model_path is None:
raise Exception(
"Must supply knn classifier either thourgh knn_clf or model_path")
# Load a trained KNN model (if one was passed in)
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
# Load image file and find face locations
X_face_locations = face_recognition.face_locations(
X_img_frame, model=model)
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
return []
# Find encodings for faces in the test iamge
faces_encodings = face_recognition.face_encodings(
X_img_frame, known_face_locations=X_face_locations)
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
if THRESHOLD == 'TRUE':
are_matches = [closest_distances[0][i][0] <=
distance_threshold for i in range(len(X_face_locations))]
elif THRESHOLD == 'FALSE':
are_matches = [closest_distances[0][i][0]
for i in range(len(X_face_locations))]
# Predict classes and remove classifications that aren't within the threshold
result = []
if THRESHOLD == 'TRUE':
result = [(pred, loc) if rec else (os.getenv('UNKNOWN_LABEL', 'unknown'), loc)
for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
elif THRESHOLD == 'FALSE':
result = [(pred, loc) for pred, loc, rec in zip(
knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
return result
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6, model='hog'):
"""
Recognizes faces in given image using a trained KNN classifier
:param X_img_path: path to image to be recognized
:param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
:param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
:param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
"""
if not os.path.isfile(X_img_path):
raise Exception("Invalid image path: {}".format(X_img_path))
if knn_clf is None and model_path is None:
raise Exception(
"Must supply knn classifier either thourgh knn_clf or model_path")
# Load a trained KNN model (if one was passed in)
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
# Load image file and find face locations
X_img = face_recognition.load_image_file(X_img_path)
X_face_locations = face_recognition.face_locations(X_img, model=model)
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
return []
# Find encodings for faces in the test iamge
faces_encodings = face_recognition.face_encodings(
X_img, known_face_locations=X_face_locations)
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
if THRESHOLD == 'TRUE':
are_matches = [closest_distances[0][i][0] <=
distance_threshold for i in range(len(X_face_locations))]
elif THRESHOLD == 'FALSE':
are_matches = [closest_distances[0][i][0]
for i in range(len(X_face_locations))]
# Predict classes and remove classifications that aren't within the threshold
result = []
if THRESHOLD == 'TRUE':
result = [(pred, loc) if rec else (os.getenv('UNKNOWN_LABLE', 'unkown'), loc)
for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
elif THRESHOLD == 'FALSE':
result = [(pred, loc) for pred, loc, rec in zip(
knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
return result
if __name__ == '__main__':
import settings
if len(sys.argv) != 2:
print('prediction_knn.py <img_path>')
exit(1)
img_path = sys.argv[1]
model_path = os.path.join(
os.getenv('MODELSET_DIR'), os.getenv('KNN_MODEL'))
X_img = face_recognition.load_image_file(img_path)
print(predict_frame(X_img_frame=X_img, model_path=model_path))
| []
| []
| [
"THRESHOLD",
"UNKNOWN_LABEL",
"UNKNOWN_LABLE",
"MODELSET_DIR",
"KNN_MODEL"
]
| [] | ["THRESHOLD", "UNKNOWN_LABEL", "UNKNOWN_LABLE", "MODELSET_DIR", "KNN_MODEL"] | python | 5 | 0 | |
mainboilerplate/config.go | package mainboilerplate
import (
"fmt"
"os"
"path/filepath"
"github.com/jessevdk/go-flags"
)
// MustParseConfig requires that the Parser parse from the combination of an
// optional INI file, configured environment bindings, and explicit flags.
// An INI file matching |configName| is searched for in:
// * The current working directory.
// * ~/.config/gazette (under the users's $HOME or %UserProfile% directory).
// * $APPLICATION_CONFIG_ROOT
func MustParseConfig(parser *flags.Parser, configName string) {
// Allow unknown options while parsing an INI file.
var origOptions = parser.Options
parser.Options |= flags.IgnoreUnknown
var iniParser = flags.NewIniParser(parser)
var prefixes = []string{
".",
filepath.Join(os.Getenv("HOME"), ".config", "gazette"),
filepath.Join(os.Getenv("UserProfile"), ".config", "gazette"),
}
for _, prefix := range prefixes {
var path = filepath.Join(prefix, configName)
if err := iniParser.ParseFile(path); err == nil {
break
} else if os.IsNotExist(err) {
// Pass.
} else {
fmt.Println(err)
os.Exit(1)
}
}
// Restore original options for parsing argument flags.
parser.Options = origOptions
MustParseArgs(parser)
}
// MustParseArgs requires that Parser be able to ParseArgs without error.
func MustParseArgs(parser *flags.Parser) {
if _, err := parser.ParseArgs(os.Args[1:]); err != nil {
var flagErr, ok = err.(*flags.Error)
if !ok {
panic(err)
}
switch flagErr.Type {
case flags.ErrDuplicatedFlag, flags.ErrTag, flags.ErrInvalidTag, flags.ErrShortNameTooLong, flags.ErrMarshal:
// These error types indicate a problem in the configuration object
// |parser| was asked to parse (eg, a developer error rather than input error).
panic(err)
case flags.ErrCommandRequired:
// Extend go-flag's "Please specify one command of: ... " output with the full usage.
// This provides a nicer UX to users running the bare binary.
os.Stderr.WriteString("\n")
parser.WriteHelp(os.Stderr)
fmt.Fprintf(os.Stderr, "\nVersion %s, built at %s.\n", Version, BuildDate)
fallthrough
default:
// Other error types indicate a problem of input. Generally, `go-flags`
// already prints a helpful message and we can simply exit.
os.Exit(1)
}
}
}
// AddPrintConfigCmd to the Parser. The "print-config" command helps users test
// whether their applications are correctly configured, by exporting all runtime
// configuration in INI format.
func AddPrintConfigCmd(parser *flags.Parser, configName string) {
parser.AddCommand("print-config", "Print combined configuration and exit", `
print-config parses the combined configuration from `+configName+`, flags,
and environment variables, and then writes the configuration to stdout in INI format.
`, &printConfig{parser})
}
type printConfig struct {
*flags.Parser `no-flag:"t"`
}
func (p printConfig) Execute([]string) error {
var ini = flags.NewIniParser(p.Parser)
ini.Write(os.Stdout, flags.IniIncludeComments|flags.IniCommentDefaults|flags.IniIncludeDefaults)
return nil
}
| [
"\"HOME\"",
"\"UserProfile\""
]
| []
| [
"HOME",
"UserProfile"
]
| [] | ["HOME", "UserProfile"] | go | 2 | 0 | |
service/worker/scanner/shardscanner/scanner.go | // The MIT License (MIT)
//
// Copyright (c) 2017-2020 Uber Technologies Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package shardscanner
import (
"context"
"fmt"
"github.com/pborman/uuid"
"github.com/uber/cadence/common/blobstore"
"github.com/uber/cadence/common/pagination"
"github.com/uber/cadence/common/reconciliation/invariant"
"github.com/uber/cadence/common/reconciliation/store"
)
// Scanner is used to scan over given iterator. It is responsible for three things:
// 1. Checking invariants for each entity.
// 2. Recording corruption and failures to durable store.
// 3. Producing a ScanReport
type Scanner interface {
Scan() ScanReport
}
type (
// ShardScanner is a generic scanner which iterates over entities provided by iterator
// implementations of this scanner have to provided invariant manager and iterator
ShardScanner struct {
shardID int
itr pagination.Iterator
failedWriter store.ExecutionWriter
corruptedWriter store.ExecutionWriter
invariantManager invariant.Manager
progressReportFn func()
}
)
// NewScanner constructs a new ShardScanner
func NewScanner(
shardID int,
iterator pagination.Iterator,
blobstoreClient blobstore.Client,
blobstoreFlushThreshold int,
manager invariant.Manager,
progressReportFn func(),
) *ShardScanner {
id := uuid.New()
return &ShardScanner{
shardID: shardID,
itr: iterator,
failedWriter: store.NewBlobstoreWriter(id, store.FailedExtension, blobstoreClient, blobstoreFlushThreshold),
corruptedWriter: store.NewBlobstoreWriter(id, store.CorruptedExtension, blobstoreClient, blobstoreFlushThreshold),
invariantManager: manager,
progressReportFn: progressReportFn,
}
}
// Scan scans over all executions in shard and runs invariant checks per execution.
func (s *ShardScanner) Scan(ctx context.Context) ScanReport {
result := ScanReport{
ShardID: s.shardID,
Stats: ScanStats{
CorruptionByType: make(map[invariant.Name]int64),
},
}
for s.itr.HasNext() {
s.progressReportFn()
entity, err := s.itr.Next()
if err != nil {
result.Result.ControlFlowFailure = &ControlFlowFailure{
Info: "persistence iterator returned error",
InfoDetails: err.Error(),
}
return result
}
checkResult := s.invariantManager.RunChecks(ctx, entity)
result.Stats.EntitiesCount++
switch checkResult.CheckResultType {
case invariant.CheckResultTypeHealthy:
// do nothing if execution is healthy
case invariant.CheckResultTypeCorrupted:
if err := s.corruptedWriter.Add(store.ScanOutputEntity{
Execution: entity,
Result: checkResult,
}); err != nil {
result.Result.ControlFlowFailure = &ControlFlowFailure{
Info: "blobstore add failed for corrupted execution check",
InfoDetails: err.Error(),
}
return result
}
result.Stats.CorruptedCount++
result.Stats.CorruptionByType[*checkResult.DeterminingInvariantType]++
case invariant.CheckResultTypeFailed:
if err := s.failedWriter.Add(store.ScanOutputEntity{
Execution: entity,
Result: checkResult,
}); err != nil {
result.Result.ControlFlowFailure = &ControlFlowFailure{
Info: "blobstore add failed for failed execution check",
InfoDetails: err.Error(),
}
return result
}
result.Stats.CheckFailedCount++
default:
panic(fmt.Sprintf("unknown CheckResultType: %v", checkResult.CheckResultType))
}
}
if err := s.failedWriter.Flush(); err != nil {
result.Result.ControlFlowFailure = &ControlFlowFailure{
Info: "failed to flush for failed execution checks",
InfoDetails: err.Error(),
}
return result
}
if err := s.corruptedWriter.Flush(); err != nil {
result.Result.ControlFlowFailure = &ControlFlowFailure{
Info: "failed to flush for corrupted execution checks",
InfoDetails: err.Error(),
}
return result
}
result.Result.ShardScanKeys = &ScanKeys{
Corrupt: s.corruptedWriter.FlushedKeys(),
Failed: s.failedWriter.FlushedKeys(),
}
return result
}
| []
| []
| []
| [] | [] | go | null | null | null |
examples/docker-pull-dry-run/docker/integration-cli/docker_cli_daemon_test.go | // +build daemon,!windows
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/docker/libnetwork/iptables"
"github.com/docker/libtrust"
"github.com/go-check/check"
)
func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil {
c.Fatalf("Could not run top1: err=%v\n%s", err, out)
}
// --restart=no by default
if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil {
c.Fatalf("Could not run top2: err=%v\n%s", err, out)
}
testRun := func(m map[string]bool, prefix string) {
var format string
for cont, shouldRun := range m {
out, err := s.d.Cmd("ps")
if err != nil {
c.Fatalf("Could not run ps: err=%v\n%q", err, out)
}
if shouldRun {
format = "%scontainer %q is not running"
} else {
format = "%scontainer %q is running"
}
if shouldRun != strings.Contains(out, cont) {
c.Fatalf(format, prefix, cont)
}
}
}
testRun(map[string]bool{"top1": true, "top2": true}, "")
if err := s.d.Restart(); err != nil {
c.Fatalf("Could not restart daemon: %v", err)
}
testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ")
}
func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil {
c.Fatal(err, out)
}
if err := s.d.Restart(); err != nil {
c.Fatal(err)
}
if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil {
c.Fatal(err, out)
}
out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1")
c.Assert(err, check.IsNil)
if _, err := inspectMountPointJSON(out, "/foo"); err != nil {
c.Fatalf("Expected volume to exist: /foo, error: %v\n", err)
}
}
// #11008
func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) {
err := s.d.StartWithBusybox()
c.Assert(err, check.IsNil)
out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top")
c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out))
out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top")
c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out))
testRun := func(m map[string]bool, prefix string) {
var format string
for name, shouldRun := range m {
out, err := s.d.Cmd("ps")
c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out))
if shouldRun {
format = "%scontainer %q is not running"
} else {
format = "%scontainer %q is running"
}
c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name))
}
}
// both running
testRun(map[string]bool{"top1": true, "top2": true}, "")
out, err = s.d.Cmd("stop", "top1")
c.Assert(err, check.IsNil, check.Commentf(out))
out, err = s.d.Cmd("stop", "top2")
c.Assert(err, check.IsNil, check.Commentf(out))
// both stopped
testRun(map[string]bool{"top1": false, "top2": false}, "")
err = s.d.Restart()
c.Assert(err, check.IsNil)
// restart=always running
testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ")
out, err = s.d.Cmd("start", "top2")
c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out))
err = s.d.Restart()
c.Assert(err, check.IsNil)
// both running
testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ")
}
func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) {
if err := s.d.Start("--iptables=false"); err != nil {
c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err)
}
}
// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and
// no longer has an IP associated, we should gracefully handle that case and associate
// an IP with it rather than fail daemon start
func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) {
// rather than depending on brctl commands to verify docker0 is created and up
// let's start the daemon and stop it, and then make a modification to run the
// actual test
if err := s.d.Start(); err != nil {
c.Fatalf("Could not start daemon: %v", err)
}
if err := s.d.Stop(); err != nil {
c.Fatalf("Could not stop daemon: %v", err)
}
// now we will remove the ip from docker0 and then try starting the daemon
ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0")
stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd)
if err != nil {
c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr)
}
if err := s.d.Start(); err != nil {
warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix"
c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning)
}
}
func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil {
c.Fatalf("Could not run top: %s, %v", out, err)
}
// get output from iptables with container running
ipTablesSearchString := "tcp dpt:80"
ipTablesCmd := exec.Command("iptables", "-nvL")
out, _, err := runCommandWithOutput(ipTablesCmd)
if err != nil {
c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
}
if !strings.Contains(out, ipTablesSearchString) {
c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)
}
if err := s.d.Stop(); err != nil {
c.Fatalf("Could not stop daemon: %v", err)
}
// get output from iptables after restart
ipTablesCmd = exec.Command("iptables", "-nvL")
out, _, err = runCommandWithOutput(ipTablesCmd)
if err != nil {
c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
}
if strings.Contains(out, ipTablesSearchString) {
c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out)
}
}
func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil {
c.Fatalf("Could not run top: %s, %v", out, err)
}
// get output from iptables with container running
ipTablesSearchString := "tcp dpt:80"
ipTablesCmd := exec.Command("iptables", "-nvL")
out, _, err := runCommandWithOutput(ipTablesCmd)
if err != nil {
c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
}
if !strings.Contains(out, ipTablesSearchString) {
c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)
}
if err := s.d.Restart(); err != nil {
c.Fatalf("Could not restart daemon: %v", err)
}
// make sure the container is not running
runningOut, err := s.d.Cmd("inspect", "--format='{{.State.Running}}'", "top")
if err != nil {
c.Fatalf("Could not inspect on container: %s, %v", out, err)
}
if strings.TrimSpace(runningOut) != "true" {
c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut))
}
// get output from iptables after restart
ipTablesCmd = exec.Command("iptables", "-nvL")
out, _, err = runCommandWithOutput(ipTablesCmd)
if err != nil {
c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
}
if !strings.Contains(out, ipTablesSearchString) {
c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out)
}
}
// TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge
// has the fe80::1 address and that a container is assigned a link-local address
func (s *DockerSuite) TestDaemonIPv6Enabled(c *check.C) {
testRequires(c, IPv6)
if err := setupV6(); err != nil {
c.Fatal("Could not set up host for IPv6 tests")
}
d := NewDaemon(c)
if err := d.StartWithBusybox("--ipv6"); err != nil {
c.Fatal(err)
}
defer d.Stop()
iface, err := net.InterfaceByName("docker0")
if err != nil {
c.Fatalf("Error getting docker0 interface: %v", err)
}
addrs, err := iface.Addrs()
if err != nil {
c.Fatalf("Error getting addresses for docker0 interface: %v", err)
}
var found bool
expected := "fe80::1/64"
for i := range addrs {
if addrs[i].String() == expected {
found = true
}
}
if !found {
c.Fatalf("Bridge does not have an IPv6 Address")
}
if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil {
c.Fatalf("Could not run container: %s, %v", out, err)
}
out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.LinkLocalIPv6Address}}'", "ipv6test")
out = strings.Trim(out, " \r\n'")
if err != nil {
c.Fatalf("Error inspecting container: %s, %v", out, err)
}
if ip := net.ParseIP(out); ip == nil {
c.Fatalf("Container should have a link-local IPv6 address")
}
out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.GlobalIPv6Address}}'", "ipv6test")
out = strings.Trim(out, " \r\n'")
if err != nil {
c.Fatalf("Error inspecting container: %s, %v", out, err)
}
if ip := net.ParseIP(out); ip != nil {
c.Fatalf("Container should not have a global IPv6 address: %v", out)
}
if err := teardownV6(); err != nil {
c.Fatal("Could not perform teardown for IPv6 tests")
}
}
// TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR
// that running containers are given a link-local and global IPv6 address
func (s *DockerSuite) TestDaemonIPv6FixedCIDR(c *check.C) {
testRequires(c, IPv6)
if err := setupV6(); err != nil {
c.Fatal("Could not set up host for IPv6 tests")
}
d := NewDaemon(c)
if err := d.StartWithBusybox("--ipv6", "--fixed-cidr-v6='2001:db8:1::/64'"); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
defer d.Stop()
if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil {
c.Fatalf("Could not run container: %s, %v", out, err)
}
out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.LinkLocalIPv6Address}}'", "ipv6test")
out = strings.Trim(out, " \r\n'")
if err != nil {
c.Fatalf("Error inspecting container: %s, %v", out, err)
}
if ip := net.ParseIP(out); ip == nil {
c.Fatalf("Container should have a link-local IPv6 address")
}
out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.GlobalIPv6Address}}'", "ipv6test")
out = strings.Trim(out, " \r\n'")
if err != nil {
c.Fatalf("Error inspecting container: %s, %v", out, err)
}
if ip := net.ParseIP(out); ip == nil {
c.Fatalf("Container should have a global IPv6 address")
}
if err := teardownV6(); err != nil {
c.Fatal("Could not perform teardown for IPv6 tests")
}
}
func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) {
c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level"))
}
func (s *DockerSuite) TestDaemonStartWithBackwardCompatibility(c *check.C) {
var validCommandArgs = [][]string{
{"--selinux-enabled", "-l", "info"},
{"--insecure-registry", "daemon"},
}
var invalidCommandArgs = [][]string{
{"--selinux-enabled", "--storage-opt"},
{"-D", "-b"},
{"--config", "/tmp"},
}
for _, args := range validCommandArgs {
d := NewDaemon(c)
d.Command = "--daemon"
if err := d.Start(args...); err != nil {
c.Fatalf("Daemon should have started successfully with --daemon %v: %v", args, err)
}
d.Stop()
}
for _, args := range invalidCommandArgs {
d := NewDaemon(c)
if err := d.Start(args...); err == nil {
d.Stop()
c.Fatalf("Daemon should have failed to start with %v", args)
}
}
}
func (s *DockerSuite) TestDaemonStartWithDaemonCommand(c *check.C) {
type kind int
const (
common kind = iota
daemon
)
var flags = []map[kind][]string{
{common: {"-l", "info"}, daemon: {"--selinux-enabled"}},
{common: {"-D"}, daemon: {"--selinux-enabled", "-r"}},
{common: {"-D"}, daemon: {"--restart"}},
{common: {"--debug"}, daemon: {"--log-driver=json-file", "--log-opt=max-size=1k"}},
}
var invalidGlobalFlags = [][]string{
//Invalid because you cannot pass daemon flags as global flags.
{"--selinux-enabled", "-l", "info"},
{"-D", "-r"},
{"--config", "/tmp"},
}
// `docker daemon -l info --selinux-enabled`
// should NOT error out
for _, f := range flags {
d := NewDaemon(c)
args := append(f[common], f[daemon]...)
if err := d.Start(args...); err != nil {
c.Fatalf("Daemon should have started successfully with %v: %v", args, err)
}
d.Stop()
}
// `docker -l info daemon --selinux-enabled`
// should error out
for _, f := range flags {
d := NewDaemon(c)
d.GlobalFlags = f[common]
if err := d.Start(f[daemon]...); err == nil {
d.Stop()
c.Fatalf("Daemon should have failed to start with docker %v daemon %v", d.GlobalFlags, f[daemon])
}
}
for _, f := range invalidGlobalFlags {
cmd := exec.Command(dockerBinary, append(f, "daemon")...)
errch := make(chan error)
var err error
go func() {
errch <- cmd.Run()
}()
select {
case <-time.After(time.Second):
cmd.Process.Kill()
case err = <-errch:
}
if err == nil {
c.Fatalf("Daemon should have failed to start with docker %v daemon", f)
}
}
}
func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) {
if err := s.d.Start("--log-level=debug"); err != nil {
c.Fatal(err)
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), `level=debug`) {
c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content))
}
}
func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) {
// we creating new daemons to create new logFile
if err := s.d.Start("--log-level=fatal"); err != nil {
c.Fatal(err)
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if strings.Contains(string(content), `level=debug`) {
c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content))
}
}
func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) {
if err := s.d.Start("-D"); err != nil {
c.Fatal(err)
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), `level=debug`) {
c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content))
}
}
func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) {
if err := s.d.Start("--debug"); err != nil {
c.Fatal(err)
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), `level=debug`) {
c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content))
}
}
func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) {
if err := s.d.Start("--debug", "--log-level=fatal"); err != nil {
c.Fatal(err)
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), `level=debug`) {
c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content))
}
}
func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
listeningPorts := [][]string{
{"0.0.0.0", "0.0.0.0", "5678"},
{"127.0.0.1", "127.0.0.1", "1234"},
{"localhost", "127.0.0.1", "1235"},
}
cmdArgs := make([]string, 0, len(listeningPorts)*2)
for _, hostDirective := range listeningPorts {
cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2]))
}
if err := s.d.StartWithBusybox(cmdArgs...); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
for _, hostDirective := range listeningPorts {
output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true")
if err == nil {
c.Fatalf("Container should not start, expected port already allocated error: %q", output)
} else if !strings.Contains(output, "port is already allocated") {
c.Fatalf("Expected port is already allocated error: %q", output)
}
}
}
func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
// TODO: skip or update for Windows daemon
os.Remove("/etc/docker/key.json")
if err := s.d.Start(); err != nil {
c.Fatalf("Could not start daemon: %v", err)
}
s.d.Stop()
k, err := libtrust.LoadKeyFile("/etc/docker/key.json")
if err != nil {
c.Fatalf("Error opening key file")
}
kid := k.KeyID()
// Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF)
if len(kid) != 59 {
c.Fatalf("Bad key ID: %s", kid)
}
}
func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) {
// TODO: skip or update for Windows daemon
os.Remove("/etc/docker/key.json")
k1, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
c.Fatalf("Error generating private key: %s", err)
}
if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil {
c.Fatalf("Error creating .docker directory: %s", err)
}
if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil {
c.Fatalf("Error saving private key: %s", err)
}
if err := s.d.Start(); err != nil {
c.Fatalf("Could not start daemon: %v", err)
}
s.d.Stop()
k2, err := libtrust.LoadKeyFile("/etc/docker/key.json")
if err != nil {
c.Fatalf("Error opening key file")
}
if k1.KeyID() != k2.KeyID() {
c.Fatalf("Key not migrated")
}
}
// GH#11320 - verify that the daemon exits on failure properly
// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) {
//attempt to start daemon with incorrect flags (we know -b and --bip conflict)
if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil {
//verify we got the right error
if !strings.Contains(err.Error(), "Daemon exited and never started") {
c.Fatalf("Expected daemon not to start, got %v", err)
}
// look in the log and make sure we got the message that daemon is shutting down
runCmd := exec.Command("grep", "Error starting daemon", s.d.LogfileName())
if out, _, err := runCommandWithOutput(runCmd); err != nil {
c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err)
}
} else {
//if we didn't get an error and the daemon is running, this is a failure
c.Fatal("Conflicting options should cause the daemon to error out with a failure")
}
}
func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) {
d := s.d
err := d.Start("--bridge", "nosuchbridge")
c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail"))
defer d.Restart()
bridgeName := "external-bridge"
bridgeIP := "192.169.1.1/24"
_, bridgeIPNet, _ := net.ParseCIDR(bridgeIP)
out, err := createInterface(c, "bridge", bridgeName, bridgeIP)
c.Assert(err, check.IsNil, check.Commentf(out))
defer deleteInterface(c, bridgeName)
err = d.StartWithBusybox("--bridge", bridgeName)
c.Assert(err, check.IsNil)
ipTablesSearchString := bridgeIPNet.String()
ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
out, _, err = runCommandWithOutput(ipTablesCmd)
c.Assert(err, check.IsNil)
c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true,
check.Commentf("iptables output should have contained %q, but was %q",
ipTablesSearchString, out))
_, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top")
c.Assert(err, check.IsNil)
containerIP := d.findContainerIP("ExtContainer")
ip := net.ParseIP(containerIP)
c.Assert(bridgeIPNet.Contains(ip), check.Equals, true,
check.Commentf("Container IP-Address must be in the same subnet range : %s",
containerIP))
}
func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) {
args := []string{"link", "add", "name", ifName, "type", ifType}
ipLinkCmd := exec.Command("ip", args...)
out, _, err := runCommandWithOutput(ipLinkCmd)
if err != nil {
return out, err
}
ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up")
out, _, err = runCommandWithOutput(ifCfgCmd)
return out, err
}
func deleteInterface(c *check.C, ifName string) {
ifCmd := exec.Command("ip", "link", "delete", ifName)
out, _, err := runCommandWithOutput(ifCmd)
c.Assert(err, check.IsNil, check.Commentf(out))
flushCmd := exec.Command("iptables", "-t", "nat", "--flush")
out, _, err = runCommandWithOutput(flushCmd)
c.Assert(err, check.IsNil, check.Commentf(out))
flushCmd = exec.Command("iptables", "--flush")
out, _, err = runCommandWithOutput(flushCmd)
c.Assert(err, check.IsNil, check.Commentf(out))
}
func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) {
// TestDaemonBridgeIP Steps
// 1. Delete the existing docker0 Bridge
// 2. Set --bip daemon configuration and start the new Docker Daemon
// 3. Check if the bip config has taken effect using ifconfig and iptables commands
// 4. Launch a Container and make sure the IP-Address is in the expected subnet
// 5. Delete the docker0 Bridge
// 6. Restart the Docker Daemon (via deferred action)
// This Restart takes care of bringing docker0 interface back to auto-assigned IP
defaultNetworkBridge := "docker0"
deleteInterface(c, defaultNetworkBridge)
d := s.d
bridgeIP := "192.169.1.1/24"
ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP)
err := d.StartWithBusybox("--bip", bridgeIP)
c.Assert(err, check.IsNil)
defer d.Restart()
ifconfigSearchString := ip.String()
ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge)
out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd)
c.Assert(err, check.IsNil)
c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true,
check.Commentf("ifconfig output should have contained %q, but was %q",
ifconfigSearchString, out))
ipTablesSearchString := bridgeIPNet.String()
ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
out, _, err = runCommandWithOutput(ipTablesCmd)
c.Assert(err, check.IsNil)
c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true,
check.Commentf("iptables output should have contained %q, but was %q",
ipTablesSearchString, out))
out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top")
c.Assert(err, check.IsNil)
containerIP := d.findContainerIP("test")
ip = net.ParseIP(containerIP)
c.Assert(bridgeIPNet.Contains(ip), check.Equals, true,
check.Commentf("Container IP-Address must be in the same subnet range : %s",
containerIP))
deleteInterface(c, defaultNetworkBridge)
}
func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) {
if err := s.d.Start(); err != nil {
c.Fatalf("Could not start daemon: %v", err)
}
defer s.d.Restart()
if err := s.d.Stop(); err != nil {
c.Fatalf("Could not stop daemon: %v", err)
}
// now we will change the docker0's IP and then try starting the daemon
bridgeIP := "192.169.100.1/24"
_, bridgeIPNet, _ := net.ParseCIDR(bridgeIP)
ipCmd := exec.Command("ifconfig", "docker0", bridgeIP)
stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd)
if err != nil {
c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr)
}
if err := s.d.Start("--bip", bridgeIP); err != nil {
c.Fatalf("Could not start daemon: %v", err)
}
//check if the iptables contains new bridgeIP MASQUERADE rule
ipTablesSearchString := bridgeIPNet.String()
ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
out, _, err := runCommandWithOutput(ipTablesCmd)
if err != nil {
c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
}
if !strings.Contains(out, ipTablesSearchString) {
c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out)
}
}
func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) {
d := s.d
bridgeName := "external-bridge"
bridgeIP := "192.169.1.1/24"
out, err := createInterface(c, "bridge", bridgeName, bridgeIP)
c.Assert(err, check.IsNil, check.Commentf(out))
defer deleteInterface(c, bridgeName)
args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"}
err = d.StartWithBusybox(args...)
c.Assert(err, check.IsNil)
defer d.Restart()
for i := 0; i < 4; i++ {
cName := "Container" + strconv.Itoa(i)
out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top")
if err != nil {
c.Assert(strings.Contains(out, "no available ip addresses"), check.Equals, true,
check.Commentf("Could not run a Container : %s %s", err.Error(), out))
}
}
}
func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) {
defaultNetworkBridge := "docker0"
deleteInterface(c, defaultNetworkBridge)
d := s.d
bridgeIP := "192.169.1.1"
bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP)
err := d.StartWithBusybox("--bip", bridgeIPNet)
c.Assert(err, check.IsNil)
defer d.Restart()
expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP)
out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0")
c.Assert(strings.Contains(out, expectedMessage), check.Equals, true,
check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'",
bridgeIP, strings.TrimSpace(out)))
deleteInterface(c, defaultNetworkBridge)
}
func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) {
defaultNetworkBridge := "docker0"
deleteInterface(c, defaultNetworkBridge)
d := s.d
bridgeIP := "192.169.1.1"
bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP)
gatewayIP := "192.169.1.254"
err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP)
c.Assert(err, check.IsNil)
defer d.Restart()
expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP)
out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0")
c.Assert(strings.Contains(out, expectedMessage), check.Equals, true,
check.Commentf("Explicit default gateway should be %s, but default route was '%s'",
gatewayIP, strings.TrimSpace(out)))
deleteInterface(c, defaultNetworkBridge)
}
func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) {
defaultNetworkBridge := "docker0"
deleteInterface(c, defaultNetworkBridge)
// Program a custom default gateway outside of the container subnet, daemon should accept it and start
err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254")
c.Assert(err, check.IsNil)
deleteInterface(c, defaultNetworkBridge)
s.d.Restart()
}
func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) {
d := s.d
ipStr := "192.170.1.1/24"
ip, _, _ := net.ParseCIDR(ipStr)
args := []string{"--ip", ip.String()}
err := d.StartWithBusybox(args...)
c.Assert(err, check.IsNil)
defer d.Restart()
out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top")
c.Assert(err, check.NotNil,
check.Commentf("Running a container must fail with an invalid --ip option"))
c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true)
ifName := "dummy"
out, err = createInterface(c, "dummy", ifName, ipStr)
c.Assert(err, check.IsNil, check.Commentf(out))
defer deleteInterface(c, ifName)
_, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top")
c.Assert(err, check.IsNil)
ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
out, _, err = runCommandWithOutput(ipTablesCmd)
c.Assert(err, check.IsNil)
regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String())
matched, _ := regexp.MatchString(regex, out)
c.Assert(matched, check.Equals, true,
check.Commentf("iptables output should have contained %q, but was %q", regex, out))
}
func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) {
d := s.d
bridgeName := "external-bridge"
bridgeIP := "192.169.1.1/24"
out, err := createInterface(c, "bridge", bridgeName, bridgeIP)
c.Assert(err, check.IsNil, check.Commentf(out))
defer deleteInterface(c, bridgeName)
args := []string{"--bridge", bridgeName, "--icc=false"}
err = d.StartWithBusybox(args...)
c.Assert(err, check.IsNil)
defer d.Restart()
ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD")
out, _, err = runCommandWithOutput(ipTablesCmd)
c.Assert(err, check.IsNil)
regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName)
matched, _ := regexp.MatchString(regex, out)
c.Assert(matched, check.Equals, true,
check.Commentf("iptables output should have contained %q, but was %q", regex, out))
// Pinging another container must fail with --icc=false
pingContainers(c, d, true)
ipStr := "192.171.1.1/24"
ip, _, _ := net.ParseCIDR(ipStr)
ifName := "icc-dummy"
createInterface(c, "dummy", ifName, ipStr)
// But, Pinging external or a Host interface must succeed
pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String())
runArgs := []string{"--rm", "busybox", "sh", "-c", pingCmd}
_, err = d.Cmd("run", runArgs...)
c.Assert(err, check.IsNil)
}
func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) {
d := s.d
bridgeName := "external-bridge"
bridgeIP := "192.169.1.1/24"
out, err := createInterface(c, "bridge", bridgeName, bridgeIP)
c.Assert(err, check.IsNil, check.Commentf(out))
defer deleteInterface(c, bridgeName)
args := []string{"--bridge", bridgeName, "--icc=false"}
err = d.StartWithBusybox(args...)
c.Assert(err, check.IsNil)
defer d.Restart()
ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD")
out, _, err = runCommandWithOutput(ipTablesCmd)
c.Assert(err, check.IsNil)
regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName)
matched, _ := regexp.MatchString(regex, out)
c.Assert(matched, check.Equals, true,
check.Commentf("iptables output should have contained %q, but was %q", regex, out))
out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567")
c.Assert(err, check.IsNil, check.Commentf(out))
out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567")
c.Assert(err, check.IsNil, check.Commentf(out))
}
func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) {
bridgeName := "external-bridge"
bridgeIP := "192.169.1.1/24"
out, err := createInterface(c, "bridge", bridgeName, bridgeIP)
c.Assert(err, check.IsNil, check.Commentf(out))
defer deleteInterface(c, bridgeName)
err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false")
c.Assert(err, check.IsNil)
defer s.d.Restart()
_, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top")
c.Assert(err, check.IsNil)
_, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top")
c.Assert(err, check.IsNil)
childIP := s.d.findContainerIP("child")
parentIP := s.d.findContainerIP("parent")
sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"}
destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"}
if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) {
c.Fatal("Iptables rules not found")
}
s.d.Cmd("rm", "--link", "parent/http")
if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) {
c.Fatal("Iptables rules should be removed when unlink")
}
s.d.Cmd("kill", "child")
s.d.Cmd("kill", "parent")
}
func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) {
testRequires(c, NativeExecDriver)
if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)")
if err != nil {
c.Fatal(out, err)
}
outArr := strings.Split(out, "\n")
if len(outArr) < 2 {
c.Fatalf("got unexpected output: %s", out)
}
nofile := strings.TrimSpace(outArr[0])
nproc := strings.TrimSpace(outArr[1])
if nofile != "42" {
c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile)
}
if nproc != "2048" {
c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc)
}
// Now restart daemon with a new default
if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil {
c.Fatal(err)
}
out, err = s.d.Cmd("start", "-a", "test")
if err != nil {
c.Fatal(err)
}
outArr = strings.Split(out, "\n")
if len(outArr) < 2 {
c.Fatalf("got unexpected output: %s", out)
}
nofile = strings.TrimSpace(outArr[0])
nproc = strings.TrimSpace(outArr[1])
if nofile != "43" {
c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile)
}
if nproc != "2048" {
c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc)
}
}
// #11315
func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil {
c.Fatal(err, out)
}
if out, err := s.d.Cmd("rename", "test", "test2"); err != nil {
c.Fatal(err, out)
}
if err := s.d.Restart(); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("start", "test2"); err != nil {
c.Fatal(err, out)
}
}
func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline")
if err != nil {
c.Fatal(out, err)
}
id := strings.TrimSpace(out)
if out, err := s.d.Cmd("wait", id); err != nil {
c.Fatal(out, err)
}
logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log")
if _, err := os.Stat(logPath); err != nil {
c.Fatal(err)
}
f, err := os.Open(logPath)
if err != nil {
c.Fatal(err)
}
var res struct {
Log string `json:"log"`
Stream string `json:"stream"`
Time time.Time `json:"time"`
}
if err := json.NewDecoder(f).Decode(&res); err != nil {
c.Fatal(err)
}
if res.Log != "testline\n" {
c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n")
}
if res.Stream != "stdout" {
c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout")
}
if !time.Now().After(res.Time) {
c.Fatalf("Log time %v in future", res.Time)
}
}
func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline")
if err != nil {
c.Fatal(out, err)
}
id := strings.TrimSpace(out)
if out, err := s.d.Cmd("wait", id); err != nil {
c.Fatal(out, err)
}
logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log")
if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) {
c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err)
}
}
func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) {
if err := s.d.StartWithBusybox("--log-driver=none"); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline")
if err != nil {
c.Fatal(out, err)
}
id := strings.TrimSpace(out)
if out, err := s.d.Cmd("wait", id); err != nil {
c.Fatal(out, err)
}
logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log")
if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) {
c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err)
}
}
func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) {
if err := s.d.StartWithBusybox("--log-driver=none"); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline")
if err != nil {
c.Fatal(out, err)
}
id := strings.TrimSpace(out)
if out, err := s.d.Cmd("wait", id); err != nil {
c.Fatal(out, err)
}
logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log")
if _, err := os.Stat(logPath); err != nil {
c.Fatal(err)
}
f, err := os.Open(logPath)
if err != nil {
c.Fatal(err)
}
var res struct {
Log string `json:"log"`
Stream string `json:"stream"`
Time time.Time `json:"time"`
}
if err := json.NewDecoder(f).Decode(&res); err != nil {
c.Fatal(err)
}
if res.Log != "testline\n" {
c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n")
}
if res.Stream != "stdout" {
c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout")
}
if !time.Now().After(res.Time) {
c.Fatalf("Log time %v in future", res.Time)
}
}
func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) {
if err := s.d.StartWithBusybox("--log-driver=none"); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline")
if err != nil {
c.Fatal(out, err)
}
id := strings.TrimSpace(out)
out, err = s.d.Cmd("logs", id)
if err != nil {
c.Fatalf("Logs request should be sent and then fail with \"none\" driver")
}
if !strings.Contains(out, `Error running logs job: Failed to get logging factory: logger: no log driver named 'none' is registered`) {
c.Fatalf("There should be an error about none not being a recognized log driver, got: %s", out)
}
}
func (s *DockerDaemonSuite) TestDaemonDots(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
// Now create 4 containers
if _, err := s.d.Cmd("create", "busybox"); err != nil {
c.Fatalf("Error creating container: %q", err)
}
if _, err := s.d.Cmd("create", "busybox"); err != nil {
c.Fatalf("Error creating container: %q", err)
}
if _, err := s.d.Cmd("create", "busybox"); err != nil {
c.Fatalf("Error creating container: %q", err)
}
if _, err := s.d.Cmd("create", "busybox"); err != nil {
c.Fatalf("Error creating container: %q", err)
}
s.d.Stop()
s.d.Start("--log-level=debug")
s.d.Stop()
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if strings.Contains(string(content), "....") {
c.Fatalf("Debug level should not have ....\n%s", string(content))
}
s.d.Start("--log-level=error")
s.d.Stop()
content, _ = ioutil.ReadFile(s.d.logFile.Name())
if strings.Contains(string(content), "....") {
c.Fatalf("Error level should not have ....\n%s", string(content))
}
s.d.Start("--log-level=info")
s.d.Stop()
content, _ = ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), "....") {
c.Fatalf("Info level should have ....\n%s", string(content))
}
}
func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) {
dir, err := ioutil.TempDir("", "socket-cleanup-test")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(dir)
sockPath := filepath.Join(dir, "docker.sock")
if err := s.d.Start("--host", "unix://"+sockPath); err != nil {
c.Fatal(err)
}
if _, err := os.Stat(sockPath); err != nil {
c.Fatal("socket does not exist")
}
if err := s.d.Stop(); err != nil {
c.Fatal(err)
}
if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) {
c.Fatal("unix socket is not cleaned up")
}
}
func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) {
type Config struct {
Crv string `json:"crv"`
D string `json:"d"`
Kid string `json:"kid"`
Kty string `json:"kty"`
X string `json:"x"`
Y string `json:"y"`
}
os.Remove("/etc/docker/key.json")
if err := s.d.Start(); err != nil {
c.Fatalf("Failed to start daemon: %v", err)
}
if err := s.d.Stop(); err != nil {
c.Fatalf("Could not stop daemon: %v", err)
}
config := &Config{}
bytes, err := ioutil.ReadFile("/etc/docker/key.json")
if err != nil {
c.Fatalf("Error reading key.json file: %s", err)
}
// byte[] to Data-Struct
if err := json.Unmarshal(bytes, &config); err != nil {
c.Fatalf("Error Unmarshal: %s", err)
}
//replace config.Kid with the fake value
config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4"
// NEW Data-Struct to byte[]
newBytes, err := json.Marshal(&config)
if err != nil {
c.Fatalf("Error Marshal: %s", err)
}
// write back
if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil {
c.Fatalf("Error ioutil.WriteFile: %s", err)
}
defer os.Remove("/etc/docker/key.json")
if err := s.d.Start(); err == nil {
c.Fatalf("It should not be successful to start daemon with wrong key: %v", err)
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), "Public Key ID does not match") {
c.Fatal("Missing KeyID message from daemon logs")
}
}
func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat")
if err != nil {
c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out)
}
containerID := strings.TrimSpace(out)
if out, err := s.d.Cmd("kill", containerID); err != nil {
c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out)
}
if err := s.d.Restart(); err != nil {
c.Fatalf("Could not restart daemon: %v", err)
}
errchan := make(chan error)
go func() {
if out, err := s.d.Cmd("wait", containerID); err != nil {
errchan <- fmt.Errorf("%v:\n%s", err, out)
}
close(errchan)
}()
select {
case <-time.After(5 * time.Second):
c.Fatal("Waiting on a stopped (killed) container timed out")
case err := <-errchan:
if err != nil {
c.Fatal(err)
}
}
}
// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint
func (s *DockerDaemonSuite) TestHttpsInfo(c *check.C) {
const (
testDaemonHTTPSAddr = "tcp://localhost:4271"
)
if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
"--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"}
out, err := s.d.CmdWithArgs(daemonArgs, "info")
if err != nil {
c.Fatalf("Error Occurred: %s and output: %s", err, out)
}
}
// TestTlsVerify verifies that --tlsverify=false turns on tls
func (s *DockerDaemonSuite) TestTlsVerify(c *check.C) {
out, err := exec.Command(dockerBinary, "daemon", "--tlsverify=false").CombinedOutput()
if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") {
c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out))
}
}
// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint
// by using a rogue client certificate and checks that it fails with the expected error.
func (s *DockerDaemonSuite) TestHttpsInfoRogueCert(c *check.C) {
const (
errBadCertificate = "remote error: bad certificate"
testDaemonHTTPSAddr = "tcp://localhost:4271"
)
if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
"--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"}
out, err := s.d.CmdWithArgs(daemonArgs, "info")
if err == nil || !strings.Contains(out, errBadCertificate) {
c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out)
}
}
// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint
// which provides a rogue server certificate and checks that it fails with the expected error
func (s *DockerDaemonSuite) TestHttpsInfoRogueServerCert(c *check.C) {
const (
errCaUnknown = "x509: certificate signed by unknown authority"
testDaemonRogueHTTPSAddr = "tcp://localhost:4272"
)
if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem",
"--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
daemonArgs := []string{"--host", testDaemonRogueHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"}
out, err := s.d.CmdWithArgs(daemonArgs, "info")
if err == nil || !strings.Contains(out, errCaUnknown) {
c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out)
}
}
func pingContainers(c *check.C, d *Daemon, expectFailure bool) {
var dargs []string
if d != nil {
dargs = []string{"--host", d.sock()}
}
args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top")
dockerCmd(c, args...)
args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c")
pingCmd := "ping -c 1 %s -W 1"
args = append(args, fmt.Sprintf(pingCmd, "alias1"))
_, _, err := dockerCmdWithError(args...)
if expectFailure {
c.Assert(err, check.NotNil)
} else {
c.Assert(err, check.IsNil)
}
args = append(dargs, "rm", "-f", "container1")
dockerCmd(c, args...)
}
func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) {
c.Assert(s.d.StartWithBusybox(), check.IsNil)
socket := filepath.Join(s.d.folder, "docker.sock")
out, err := s.d.Cmd("run", "-d", "-v", socket+":/sock", "busybox")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
c.Assert(s.d.Restart(), check.IsNil)
}
func (s *DockerDaemonSuite) TestCleanupMountsAfterCrash(c *check.C) {
c.Assert(s.d.StartWithBusybox(), check.IsNil)
out, err := s.d.Cmd("run", "-d", "busybox", "top")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
id := strings.TrimSpace(out)
c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil)
c.Assert(s.d.Start(), check.IsNil)
mountOut, err := exec.Command("mount").CombinedOutput()
c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, check.Commentf("Something mounted from older daemon start: %s", mountOut))
}
func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) {
testRequires(c, NativeExecDriver)
c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil)
out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
c.Assert(strings.Contains(out, "eth0"), check.Equals, false,
check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out))
out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
c.Assert(strings.Contains(out, "eth0"), check.Equals, false,
check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out))
cmd := exec.Command("ip", "l")
stdout := bytes.NewBuffer(nil)
cmd.Stdout = stdout
if err := cmd.Run(); err != nil {
c.Fatal("Failed to get host network interface")
}
out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "ip", "l")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout),
check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out))
}
func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
t.Fatal(err)
}
if out, err := s.d.Cmd("run", "-ti", "-d", "--name", "test", "busybox"); err != nil {
t.Fatal(out, err)
}
if err := s.d.Restart(); err != nil {
t.Fatal(err)
}
// Container 'test' should be removed without error
if out, err := s.d.Cmd("rm", "test"); err != nil {
t.Fatal(out, err)
}
}
func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top")
if err != nil {
c.Fatal(out, err)
}
// Get sandbox key via inspect
out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns")
if err != nil {
c.Fatalf("Error inspecting container: %s, %v", out, err)
}
fileName := strings.Trim(out, " \r\n'")
if out, err := s.d.Cmd("stop", "netns"); err != nil {
c.Fatal(out, err)
}
// Test if the file still exists
out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName))
out = strings.TrimSpace(out)
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
c.Assert(out, check.Equals, fileName, check.Commentf("Output: %s", out))
// Remove the container and restart the daemon
if out, err := s.d.Cmd("rm", "netns"); err != nil {
c.Fatal(out, err)
}
if err := s.d.Restart(); err != nil {
c.Fatal(err)
}
// Test again and see now the netns file does not exist
out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName))
out = strings.TrimSpace(out)
c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out))
}
// tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored
func (s *DockerDaemonSuite) TestDaemonNoTlsCliTlsVerifyWithEnv(c *check.C) {
host := "tcp://localhost:4271"
c.Assert(s.d.Start("-H", host), check.IsNil)
cmd := exec.Command(dockerBinary, "-H", host, "info")
cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"}
out, _, err := runCommandWithOutput(cmd)
c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out))
c.Assert(strings.Contains(out, "error occurred trying to connect"), check.Equals, true)
}
func setupV6() error {
// Hack to get the right IPv6 address on docker0, which has already been created
err := exec.Command("ip", "addr", "add", "fe80::1/64", "dev", "docker0").Run()
if err != nil {
return err
}
return nil
}
func teardownV6() error {
err := exec.Command("ip", "addr", "del", "fe80::1/64", "dev", "docker0").Run()
if err != nil {
return err
}
return nil
}
func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) {
c.Assert(s.d.StartWithBusybox(), check.IsNil)
out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top")
c.Assert(err, check.IsNil)
id := strings.TrimSpace(out)
_, err = s.d.Cmd("stop", id)
c.Assert(err, check.IsNil)
_, err = s.d.Cmd("wait", id)
c.Assert(err, check.IsNil)
out, err = s.d.Cmd("ps", "-q")
c.Assert(err, check.IsNil)
c.Assert(out, check.Equals, "")
c.Assert(s.d.Restart(), check.IsNil)
out, err = s.d.Cmd("ps", "-q")
c.Assert(err, check.IsNil)
c.Assert(strings.TrimSpace(out), check.Equals, id[:12])
}
func (s *DockerDaemonSuite) TestDaemonCorruptedSyslogAddress(c *check.C) {
c.Assert(s.d.Start("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:1234"), check.NotNil)
runCmd := exec.Command("grep", "Failed to set log opts: syslog-address should be in form proto://address", s.d.LogfileName())
if out, _, err := runCommandWithOutput(runCmd); err != nil {
c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err)
}
}
func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) {
if err := s.d.StartWithBusybox("--log-driver=json-file", "--log-opt=max-size=1k"); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "-d", "--name=logtest", "busybox", "top")
c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err))
out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", "logtest")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
cfg := strings.TrimSpace(out)
if cfg != "map[max-size:1k]" {
c.Fatalf("Unexpected log-opt: %s, expected map[max-size:1k]", cfg)
}
}
func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil {
c.Fatal(err, out)
}
if out, err := s.d.Cmd("pause", "test"); err != nil {
c.Fatal(err, out)
}
if err := s.d.Restart(); err != nil {
c.Fatal(err)
}
errchan := make(chan error)
go func() {
out, err := s.d.Cmd("start", "test")
if err != nil {
errchan <- fmt.Errorf("%v:\n%s", err, out)
}
name := strings.TrimSpace(out)
if name != "test" {
errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name)
}
close(errchan)
}()
select {
case <-time.After(5 * time.Second):
c.Fatal("Waiting on start a container timed out")
case err := <-errchan:
if err != nil {
c.Fatal(err)
}
}
}
func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) {
c.Assert(s.d.StartWithBusybox(), check.IsNil)
out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox")
c.Assert(err, check.IsNil, check.Commentf(out))
c.Assert(s.d.Restart(), check.IsNil)
out, err = s.d.Cmd("volume", "rm", "test")
c.Assert(err, check.Not(check.IsNil), check.Commentf("should not be able to remove in use volume after daemon restart"))
c.Assert(strings.Contains(out, "in use"), check.Equals, true)
}
func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) {
c.Assert(s.d.Start(), check.IsNil)
_, err := s.d.Cmd("volume", "create", "--name", "test")
c.Assert(err, check.IsNil)
c.Assert(s.d.Restart(), check.IsNil)
_, err = s.d.Cmd("volume", "inspect", "test")
c.Assert(err, check.IsNil)
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
test/e2e/catalog_e2e_test.go | // +build !bare
package e2e
import (
"context"
"fmt"
"net"
"os"
"reflect"
"strings"
"time"
"github.com/blang/semver"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apierrors "k8s.io/apimachinery/pkg/api/errors"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/operator-framework/api/pkg/lib/version"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
)
var _ = Describe("Catalog", func() {
AfterEach(func() {
TearDown(testNamespace)
})
It("loading between restarts", func() {
// create a simple catalogsource
packageName := genName("nginx")
stableChannel := "stable"
packageStable := packageName + "-stable"
manifests := []registry.PackageManifest{
{
PackageName: packageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: packageStable},
},
DefaultChannelName: stableChannel,
},
}
crdPlural := genName("ins")
crd := newCRD(crdPlural)
namedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
csv := newCSV(packageStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crd}, nil, namedStrategy)
c := newKubeClient()
crc := newCRClient()
catalogSourceName := genName("mock-ocs-")
_, cleanupSource := createInternalCatalogSource(c, crc, catalogSourceName, operatorNamespace, manifests, []apiextensions.CustomResourceDefinition{crd}, []v1alpha1.ClusterServiceVersion{csv})
defer cleanupSource()
// ensure the mock catalog exists and has been synced by the catalog operator
catalogSource, err := fetchCatalogSourceOnStatus(crc, catalogSourceName, operatorNamespace, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// get catalog operator deployment
deployment, err := getOperatorDeployment(c, operatorNamespace, labels.Set{"app": "catalog-operator"})
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), deployment, "Could not find catalog operator deployment")
GinkgoT( // rescale catalog operator
).Log("Rescaling catalog operator...")
err = rescaleDeployment(c, deployment)
require.NoError(GinkgoT(), err, "Could not rescale catalog operator")
GinkgoT().Log("Catalog operator rescaled")
GinkgoT( // check for last synced update to catalogsource
).Log("Checking for catalogsource lastSync updates")
_, err = fetchCatalogSourceOnStatus(crc, catalogSourceName, operatorNamespace, func(cs *v1alpha1.CatalogSource) bool {
before := catalogSource.Status.GRPCConnectionState
after := cs.Status.GRPCConnectionState
if after != nil && after.LastConnectTime.After(before.LastConnectTime.Time) {
GinkgoT().Logf("lastSync updated: %s -> %s", before.LastConnectTime, after.LastConnectTime)
return true
}
return false
})
require.NoError(GinkgoT(), err, "Catalog source changed after rescale")
GinkgoT().Logf("Catalog source sucessfully loaded after rescale")
})
It("global update triggers subscription sync", func() {
globalNS := operatorNamespace
c := newKubeClient()
crc := newCRClient()
// Determine which namespace is global. Should be `openshift-marketplace` for OCP 4.2+.
// Locally it is `olm`
namespaces, _ := c.KubernetesInterface().CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
for _, ns := range namespaces.Items {
if ns.GetName() == "openshift-marketplace" {
globalNS = "openshift-marketplace"
}
}
mainPackageName := genName("nginx-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
mainPackageReplacement := fmt.Sprintf("%s-replacement", mainPackageStable)
stableChannel := "stable"
mainNamedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
crdPlural := genName("ins-")
mainCRD := newCRD(crdPlural)
mainCSV := newCSV(mainPackageStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, mainNamedStrategy)
replacementCSV := newCSV(mainPackageReplacement, testNamespace, mainPackageStable, semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, mainNamedStrategy)
mainCatalogName := genName("mock-ocs-main-")
// Create separate manifests for each CatalogSource
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Create the initial catalog source
createInternalCatalogSource(c, crc, mainCatalogName, globalNS, mainManifests, []apiextensions.CustomResourceDefinition{mainCRD}, []v1alpha1.ClusterServiceVersion{mainCSV})
// Attempt to get the catalog source before creating install plan
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, globalNS, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionSpec := &v1alpha1.SubscriptionSpec{
CatalogSource: mainCatalogName,
CatalogSourceNamespace: globalNS,
Package: mainPackageName,
Channel: stableChannel,
StartingCSV: mainCSV.GetName(),
InstallPlanApproval: v1alpha1.ApprovalManual,
}
// Create Subscription
subscriptionName := genName("sub-")
createSubscriptionForCatalogWithSpec(GinkgoT(), crc, testNamespace, subscriptionName, subscriptionSpec)
subscription, err := fetchSubscription(crc, testNamespace, subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName := subscription.Status.Install.Name
requiresApprovalChecker := buildInstallPlanPhaseCheckFunc(v1alpha1.InstallPlanPhaseRequiresApproval)
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, requiresApprovalChecker)
require.NoError(GinkgoT(), err)
fetchedInstallPlan.Spec.Approved = true
_, err = crc.OperatorsV1alpha1().InstallPlans(testNamespace).Update(context.TODO(), fetchedInstallPlan, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
_, err = awaitCSV(GinkgoT(), crc, testNamespace, mainCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Update manifest
mainManifests = []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: replacementCSV.GetName()},
},
DefaultChannelName: stableChannel,
},
}
// Update catalog configmap
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogName, globalNS, []apiextensions.CustomResourceDefinition{mainCRD}, []v1alpha1.ClusterServiceVersion{mainCSV, replacementCSV}, mainManifests)
// Get updated catalogsource
fetchedUpdatedCatalog, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, globalNS, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscription, err = fetchSubscription(crc, testNamespace, subscriptionName, subscriptionStateUpgradePendingChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
// Ensure the timing
catalogConnState := fetchedUpdatedCatalog.Status.GRPCConnectionState
subUpdatedTime := subscription.Status.LastUpdated
timeLapse := subUpdatedTime.Sub(catalogConnState.LastConnectTime.Time).Seconds()
require.True(GinkgoT(), timeLapse < 60)
})
It("config map update triggers registry pod rollout", func() {
mainPackageName := genName("nginx-")
dependentPackageName := genName("nginxdep-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
dependentPackageStable := fmt.Sprintf("%s-stable", dependentPackageName)
stableChannel := "stable"
mainNamedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
dependentNamedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
crdPlural := genName("ins-")
dependentCRD := newCRD(crdPlural)
mainCSV := newCSV(mainPackageStable, testNamespace, "", semver.MustParse("0.1.0"), nil, []apiextensions.CustomResourceDefinition{dependentCRD}, mainNamedStrategy)
dependentCSV := newCSV(dependentPackageStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{dependentCRD}, nil, dependentNamedStrategy)
c := newKubeClient()
crc := newCRClient()
mainCatalogName := genName("mock-ocs-main-")
// Create separate manifests for each CatalogSource
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
dependentManifests := []registry.PackageManifest{
{
PackageName: dependentPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: dependentPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Create the initial catalogsource
createInternalCatalogSource(c, crc, mainCatalogName, testNamespace, mainManifests, nil, []v1alpha1.ClusterServiceVersion{mainCSV})
// Attempt to get the catalog source before creating install plan
fetchedInitialCatalog, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, testNamespace, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// Get initial configmap
configMap, err := c.KubernetesInterface().CoreV1().ConfigMaps(testNamespace).Get(context.TODO(), fetchedInitialCatalog.Spec.ConfigMap, metav1.GetOptions{})
require.NoError(GinkgoT(), err)
// Check pod created
initialPods, err := c.KubernetesInterface().CoreV1().Pods(testNamespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "olm.configMapResourceVersion=" + configMap.ResourceVersion})
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), 1, len(initialPods.Items))
// Update catalog configmap
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogName, testNamespace, []apiextensions.CustomResourceDefinition{dependentCRD}, []v1alpha1.ClusterServiceVersion{mainCSV, dependentCSV}, append(mainManifests, dependentManifests...))
// Get updated configmap
updatedConfigMap, err := c.KubernetesInterface().CoreV1().ConfigMaps(testNamespace).Get(context.TODO(), fetchedInitialCatalog.Spec.ConfigMap, metav1.GetOptions{})
require.NoError(GinkgoT(), err)
fetchedUpdatedCatalog, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, testNamespace, func(catalog *v1alpha1.CatalogSource) bool {
before := fetchedInitialCatalog.Status.ConfigMapResource
after := catalog.Status.ConfigMapResource
if after != nil && before.LastUpdateTime.Before(&after.LastUpdateTime) &&
after.ResourceVersion != before.ResourceVersion {
fmt.Println("catalog updated")
return true
}
fmt.Println("waiting for catalog pod to be available")
return false
})
require.NoError(GinkgoT(), err)
require.NotEqual(GinkgoT(), updatedConfigMap.ResourceVersion, configMap.ResourceVersion)
require.NotEqual(GinkgoT(), fetchedUpdatedCatalog.Status.ConfigMapResource.ResourceVersion, fetchedInitialCatalog.Status.ConfigMapResource.ResourceVersion)
require.Equal(GinkgoT(), updatedConfigMap.GetResourceVersion(), fetchedUpdatedCatalog.Status.ConfigMapResource.ResourceVersion)
// Await 1 CatalogSource registry pod matching the updated labels
singlePod := podCount(1)
selector := labels.SelectorFromSet(map[string]string{"olm.catalogSource": mainCatalogName, "olm.configMapResourceVersion": updatedConfigMap.GetResourceVersion()})
podList, err := awaitPods(GinkgoT(), c, testNamespace, selector.String(), singlePod)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), 1, len(podList.Items), "expected pod list not of length 1")
// Await 1 CatalogSource registry pod matching the updated labels
selector = labels.SelectorFromSet(map[string]string{"olm.catalogSource": mainCatalogName})
podList, err = awaitPods(GinkgoT(), c, testNamespace, selector.String(), singlePod)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), 1, len(podList.Items), "expected pod list not of length 1")
// Create Subscription
subscriptionName := genName("sub-")
createSubscriptionForCatalog(crc, testNamespace, subscriptionName, fetchedUpdatedCatalog.GetName(), mainPackageName, stableChannel, "", v1alpha1.ApprovalAutomatic)
subscription, err := fetchSubscription(crc, testNamespace, subscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
_, err = fetchCSV(crc, subscription.Status.CurrentCSV, testNamespace, buildCSVConditionChecker(v1alpha1.CSVPhaseSucceeded))
require.NoError(GinkgoT(), err)
ipList, err := crc.OperatorsV1alpha1().InstallPlans(testNamespace).List(context.TODO(), metav1.ListOptions{})
ipCount := 0
for _, ip := range ipList.Items {
if ownerutil.IsOwnedBy(&ip, subscription) {
ipCount += 1
}
}
require.NoError(GinkgoT(), err)
})
It("config map replace triggers registry pod rollout", func() {
mainPackageName := genName("nginx-")
dependentPackageName := genName("nginxdep-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
dependentPackageStable := fmt.Sprintf("%s-stable", dependentPackageName)
stableChannel := "stable"
mainNamedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
dependentNamedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
crdPlural := genName("ins-")
dependentCRD := newCRD(crdPlural)
mainCSV := newCSV(mainPackageStable, testNamespace, "", semver.MustParse("0.1.0"), nil, []apiextensions.CustomResourceDefinition{dependentCRD}, mainNamedStrategy)
dependentCSV := newCSV(dependentPackageStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{dependentCRD}, nil, dependentNamedStrategy)
c := newKubeClient()
crc := newCRClient()
mainCatalogName := genName("mock-ocs-main-")
// Create separate manifests for each CatalogSource
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
dependentManifests := []registry.PackageManifest{
{
PackageName: dependentPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: dependentPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Create the initial catalogsource
_, cleanupSource := createInternalCatalogSource(c, crc, mainCatalogName, testNamespace, mainManifests, nil, []v1alpha1.ClusterServiceVersion{mainCSV})
// Attempt to get the catalog source before creating install plan
fetchedInitialCatalog, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, testNamespace, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// Get initial configmap
configMap, err := c.KubernetesInterface().CoreV1().ConfigMaps(testNamespace).Get(context.TODO(), fetchedInitialCatalog.Spec.ConfigMap, metav1.GetOptions{})
require.NoError(GinkgoT(), err)
// Check pod created
initialPods, err := c.KubernetesInterface().CoreV1().Pods(testNamespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "olm.configMapResourceVersion=" + configMap.ResourceVersion})
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), 1, len(initialPods.Items))
// delete the first catalog
cleanupSource()
// create a catalog with the same name
createInternalCatalogSource(c, crc, mainCatalogName, testNamespace, append(mainManifests, dependentManifests...), []apiextensions.CustomResourceDefinition{dependentCRD}, []v1alpha1.ClusterServiceVersion{mainCSV, dependentCSV})
// Create Subscription
subscriptionName := genName("sub-")
createSubscriptionForCatalog(crc, testNamespace, subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", v1alpha1.ApprovalAutomatic)
subscription, err := fetchSubscription(crc, testNamespace, subscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
_, err = fetchCSV(crc, subscription.Status.CurrentCSV, testNamespace, buildCSVConditionChecker(v1alpha1.CSVPhaseSucceeded))
require.NoError(GinkgoT(), err)
})
It("gRPC address catalog source", func() {
// Create an internal (configmap) CatalogSource with stable and dependency csv
// Create an internal (configmap) replacement CatalogSource with a stable, stable-replacement, and dependency csv
// Copy both configmap-server pods to the test namespace
// Delete both CatalogSources
// Create an "address" CatalogSource with a Spec.Address field set to the stable copied pod's PodIP
// Create a Subscription to the stable package
// Wait for the stable Subscription to be Successful
// Wait for the stable CSV to be Successful
// Update the "address" CatalogSources's Spec.Address field with the PodIP of the replacement copied pod's PodIP
// Wait for the replacement CSV to be Successful
mainPackageName := genName("nginx-")
dependentPackageName := genName("nginxdep-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
mainPackageReplacement := fmt.Sprintf("%s-replacement", mainPackageStable)
dependentPackageStable := fmt.Sprintf("%s-stable", dependentPackageName)
stableChannel := "stable"
mainNamedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
dependentNamedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
crdPlural := genName("ins-")
dependentCRD := newCRD(crdPlural)
mainCSV := newCSV(mainPackageStable, testNamespace, "", semver.MustParse("0.1.0"), nil, []apiextensions.CustomResourceDefinition{dependentCRD}, mainNamedStrategy)
replacementCSV := newCSV(mainPackageReplacement, testNamespace, mainPackageStable, semver.MustParse("0.2.0"), nil, []apiextensions.CustomResourceDefinition{dependentCRD}, mainNamedStrategy)
dependentCSV := newCSV(dependentPackageStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{dependentCRD}, nil, dependentNamedStrategy)
c := newKubeClient()
crc := newCRClient()
mainSourceName := genName("mock-ocs-main-")
replacementSourceName := genName("mock-ocs-main-with-replacement-")
// Create separate manifests for each CatalogSource
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
replacementManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageReplacement},
},
DefaultChannelName: stableChannel,
},
}
dependentManifests := []registry.PackageManifest{
{
PackageName: dependentPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: dependentPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Create ConfigMap CatalogSources
createInternalCatalogSource(c, crc, mainSourceName, testNamespace, append(mainManifests, dependentManifests...), []apiextensions.CustomResourceDefinition{dependentCRD}, []v1alpha1.ClusterServiceVersion{mainCSV, dependentCSV})
createInternalCatalogSource(c, crc, replacementSourceName, testNamespace, append(replacementManifests, dependentManifests...), []apiextensions.CustomResourceDefinition{dependentCRD}, []v1alpha1.ClusterServiceVersion{replacementCSV, mainCSV, dependentCSV})
// Wait for ConfigMap CatalogSources to be ready
mainSource, err := fetchCatalogSourceOnStatus(crc, mainSourceName, testNamespace, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
replacementSource, err := fetchCatalogSourceOnStatus(crc, replacementSourceName, testNamespace, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// Replicate catalog pods with no OwnerReferences
mainCopy := replicateCatalogPod(GinkgoT(), c, mainSource)
mainCopy = awaitPod(GinkgoT(), c, mainCopy.GetNamespace(), mainCopy.GetName(), hasPodIP)
replacementCopy := replicateCatalogPod(GinkgoT(), c, replacementSource)
replacementCopy = awaitPod(GinkgoT(), c, replacementCopy.GetNamespace(), replacementCopy.GetName(), hasPodIP)
addressSourceName := genName("address-catalog-")
// Create a CatalogSource pointing to the grpc pod
addressSource := &v1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.CatalogSourceKind,
APIVersion: v1alpha1.CatalogSourceCRDAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: addressSourceName,
Namespace: testNamespace,
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Address: net.JoinHostPort(mainCopy.Status.PodIP, "50051"),
},
}
addressSource, err = crc.OperatorsV1alpha1().CatalogSources(testNamespace).Create(context.TODO(), addressSource, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
err := crc.OperatorsV1alpha1().CatalogSources(testNamespace).Delete(context.TODO(), addressSourceName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()
// Delete CatalogSources
err = crc.OperatorsV1alpha1().CatalogSources(testNamespace).Delete(context.TODO(), mainSourceName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
err = crc.OperatorsV1alpha1().CatalogSources(testNamespace).Delete(context.TODO(), replacementSourceName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
// Create Subscription
subscriptionName := genName("sub-")
cleanupSubscription := createSubscriptionForCatalog(crc, testNamespace, subscriptionName, addressSourceName, mainPackageName, stableChannel, "", v1alpha1.ApprovalAutomatic)
defer cleanupSubscription()
subscription, err := fetchSubscription(crc, testNamespace, subscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
_, err = fetchCSV(crc, subscription.Status.CurrentCSV, testNamespace, csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Update the catalog's address to point at the other registry pod's cluster ip
addressSource, err = crc.OperatorsV1alpha1().CatalogSources(testNamespace).Get(context.TODO(), addressSourceName, metav1.GetOptions{})
require.NoError(GinkgoT(), err)
addressSource.Spec.Address = net.JoinHostPort(replacementCopy.Status.PodIP, "50051")
_, err = crc.OperatorsV1alpha1().CatalogSources(testNamespace).Update(context.TODO(), addressSource, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Wait for the replacement CSV to be installed
_, err = awaitCSV(GinkgoT(), crc, testNamespace, replacementCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
})
It("delete internal registry pod triggers recreation", func() {
// Create internal CatalogSource containing csv in package
// Wait for a registry pod to be created
// Create a Subscription for package
// Wait for the Subscription to succeed
// Wait for csv to succeed
// Delete the registry pod
// Wait for a new registry pod to be created
// Create internal CatalogSource containing csv in package
packageName := genName("nginx-")
packageStable := fmt.Sprintf("%s-stable", packageName)
stableChannel := "stable"
namedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
sourceName := genName("catalog-")
crd := newCRD(genName("ins-"))
csv := newCSV(packageStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crd}, nil, namedStrategy)
manifests := []registry.PackageManifest{
{
PackageName: packageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: packageStable},
},
DefaultChannelName: stableChannel,
},
}
c := newKubeClient()
crc := newCRClient()
_, cleanupSource := createInternalCatalogSource(c, crc, sourceName, testNamespace, manifests, []apiextensions.CustomResourceDefinition{crd}, []v1alpha1.ClusterServiceVersion{csv})
defer cleanupSource()
// Wait for a new registry pod to be created
selector := labels.SelectorFromSet(map[string]string{"olm.catalogSource": sourceName})
singlePod := podCount(1)
registryPods, err := awaitPods(GinkgoT(), c, testNamespace, selector.String(), singlePod)
require.NoError(GinkgoT(), err, "error awaiting registry pod")
require.NotNil(GinkgoT(), registryPods, "nil registry pods")
require.Equal(GinkgoT(), 1, len(registryPods.Items), "unexpected number of registry pods found")
// Store the UID for later comparison
uid := registryPods.Items[0].GetUID()
name := registryPods.Items[0].GetName()
// Create a Subscription for package
subscriptionName := genName("sub-")
cleanupSubscription := createSubscriptionForCatalog(crc, testNamespace, subscriptionName, sourceName, packageName, stableChannel, "", v1alpha1.ApprovalAutomatic)
defer cleanupSubscription()
// Wait for the Subscription to succeed
subscription, err := fetchSubscription(crc, testNamespace, subscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
// Wait for csv to succeed
_, err = fetchCSV(crc, subscription.Status.CurrentCSV, testNamespace, csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Delete the registry pod
Eventually(func() error {
err := c.KubernetesInterface().CoreV1().Pods(testNamespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
err = nil
}
return err
}).Should(Succeed())
// Wait for a new registry pod to be created
notUID := func(pods *corev1.PodList) bool {
for _, pod := range pods.Items {
if pod.GetUID() == uid {
return false
}
}
return true
}
registryPods, err = awaitPods(GinkgoT(), c, testNamespace, selector.String(), unionPodsCheck(singlePod, notUID))
require.NoError(GinkgoT(), err, "error waiting for replacement registry pod")
require.NotNil(GinkgoT(), registryPods, "nil replacement registry pods")
require.Equal(GinkgoT(), 1, len(registryPods.Items), "unexpected number of replacement registry pods found")
})
It("delete gRPC registry pod triggers recreation", func() {
// Create gRPC CatalogSource using an external registry image (community-operators)
// Wait for a registry pod to be created
// Create a Subscription for package
// Wait for the Subscription to succeed
// Wait for csv to succeed
// Delete the registry pod
// Wait for a new registry pod to be created
sourceName := genName("catalog-")
packageName := "etcd"
channelName := "clusterwide-alpha"
// Create gRPC CatalogSource using an external registry image (community-operators)
source := &v1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.CatalogSourceKind,
APIVersion: v1alpha1.CatalogSourceCRDAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: sourceName,
Namespace: testNamespace,
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: communityOperatorsImage,
},
}
crc := newCRClient()
source, err := crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Create(context.TODO(), source, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
// Wait for a new registry pod to be created
c := newKubeClient()
selector := labels.SelectorFromSet(map[string]string{"olm.catalogSource": source.GetName()})
singlePod := podCount(1)
registryPods, err := awaitPods(GinkgoT(), c, source.GetNamespace(), selector.String(), singlePod)
require.NoError(GinkgoT(), err, "error awaiting registry pod")
require.NotNil(GinkgoT(), registryPods, "nil registry pods")
require.Equal(GinkgoT(), 1, len(registryPods.Items), "unexpected number of registry pods found")
// Store the UID for later comparison
uid := registryPods.Items[0].GetUID()
name := registryPods.Items[0].GetName()
// Create a Subscription for package
subscriptionName := genName("sub-")
_ = createSubscriptionForCatalog(crc, source.GetNamespace(), subscriptionName, source.GetName(), packageName, channelName, "", v1alpha1.ApprovalAutomatic)
// Wait for the Subscription to succeed
subscription, err := fetchSubscription(crc, testNamespace, subscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
// Wait for csv to succeed
_, err = fetchCSV(crc, subscription.Status.CurrentCSV, subscription.GetNamespace(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Delete the registry pod
Eventually(func() error {
backgroundDeletion := metav1.DeletePropagationBackground
err = c.KubernetesInterface().CoreV1().Pods(testNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{PropagationPolicy: &backgroundDeletion}, metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
if k8serrors.IsNotFound(err) {
return nil
}
return err
}
return nil
}).Should(Succeed())
Eventually(func() error {
_, err := c.KubernetesInterface().CoreV1().Pods(testNamespace).Get(context.TODO(), name, metav1.GetOptions{})
if k8serrors.IsNotFound(err) {
return nil
}
return err
}).Should(Succeed())
// Wait for a new registry pod to be created
notUID := func(pods *corev1.PodList) bool {
for _, pod := range pods.Items {
if pod.GetUID() == uid {
return false
}
}
return true
}
registryPods, err = awaitPods(GinkgoT(), c, testNamespace, selector.String(), unionPodsCheck(singlePod, notUID))
require.NoError(GinkgoT(), err, "error waiting for replacement registry pod")
require.NotNil(GinkgoT(), registryPods, "nil replacement registry pods")
require.Equal(GinkgoT(), 1, len(registryPods.Items), "unexpected number of replacement registry pods found")
})
It("image update", func() {
if os.Getenv("GITHUB_ACTIONS") == "true" {
Skip("This spec fails when run using KIND cluster. See https://github.com/operator-framework/operator-lifecycle-manager/issues/1380 for more details")
}
// Create an image based catalog source from public Quay image
// Use a unique tag as identifier
// See https://quay.io/repository/olmtest/catsrc-update-test?namespace=olmtest for registry
// Push an updated version of the image with the same identifier
// Confirm catalog source polling feature is working as expected: a newer version of the catalog source pod comes up
// etcd operator updated from 0.9.0 to 0.9.2-clusterwide
// Subscription should detect the latest version of the operator in the new catalog source and pull it
// create internal registry for purposes of pushing/pulling IF running e2e test locally
// registry is insecure and for purposes of this test only
c := newKubeClient()
crc := newCRClient()
local, err := Local(c)
if err != nil {
GinkgoT().Fatalf("cannot determine if test running locally or on CI: %s", err)
}
var registryURL string
var registryAuth string
if local {
registryURL, err = createDockerRegistry(c, testNamespace)
if err != nil {
GinkgoT().Fatalf("error creating container registry: %s", err)
}
defer deleteDockerRegistry(c, testNamespace)
// ensure registry pod is ready before attempting port-forwarding
_ = awaitPod(GinkgoT(), c, testNamespace, registryName, podReady)
err = registryPortForward(testNamespace)
if err != nil {
GinkgoT().Fatalf("port-forwarding local registry: %s", err)
}
} else {
registryURL = openshiftregistryFQDN
registryAuth, err = openshiftRegistryAuth(c, testNamespace)
if err != nil {
GinkgoT().Fatalf("error getting openshift registry authentication: %s", err)
}
}
// testImage is the name of the image used throughout the test - the image overwritten by skopeo
// the tag is generated randomly and appended to the end of the testImage
testImage := fmt.Sprint("docker://", registryURL, "/catsrc-update", ":")
tag := genName("x")
// 1. copy old catalog image into test-specific tag in internal docker registry
// create skopeo pod to actually do the work of copying (on openshift) or exec out to local skopeo
if local {
_, err := skopeoLocalCopy(testImage, tag, catsrcImage, "old")
if err != nil {
GinkgoT().Fatalf("error copying old registry file: %s", err)
}
} else {
skopeoArgs := skopeoCopyCmd(testImage, tag, catsrcImage, "old", registryAuth)
err = createSkopeoPod(c, skopeoArgs, testNamespace)
if err != nil {
GinkgoT().Fatalf("error creating skopeo pod: %s", err)
}
// wait for skopeo pod to exit successfully
awaitPod(GinkgoT(), c, testNamespace, skopeo, func(pod *corev1.Pod) bool {
return pod.Status.Phase == corev1.PodSucceeded
})
err = deleteSkopeoPod(c, testNamespace)
if err != nil {
GinkgoT().Fatalf("error deleting skopeo pod: %s", err)
}
}
// 2. setup catalog source
sourceName := genName("catalog-")
packageName := "busybox"
channelName := "alpha"
// Create gRPC CatalogSource using an external registry image and poll interval
var image string
image = testImage[9:] // strip off docker://
image = fmt.Sprint(image, tag)
source := &v1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.CatalogSourceKind,
APIVersion: v1alpha1.CatalogSourceCRDAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: sourceName,
Namespace: testNamespace,
Labels: map[string]string{"olm.catalogSource": sourceName},
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: image,
UpdateStrategy: &v1alpha1.UpdateStrategy{
RegistryPoll: &v1alpha1.RegistryPoll{
Interval: &metav1.Duration{Duration: 1 * time.Minute},
},
},
},
}
source, err = crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Create(context.TODO(), source, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Delete(context.TODO(), source.GetName(), metav1.DeleteOptions{}))
}()
// wait for new catalog source pod to be created
// Wait for a new registry pod to be created
selector := labels.SelectorFromSet(map[string]string{"olm.catalogSource": source.GetName()})
singlePod := podCount(1)
registryPods, err := awaitPods(GinkgoT(), c, source.GetNamespace(), selector.String(), singlePod)
require.NoError(GinkgoT(), err, "error awaiting registry pod")
require.NotNil(GinkgoT(), registryPods, "nil registry pods")
require.Equal(GinkgoT(), 1, len(registryPods.Items), "unexpected number of registry pods found")
// Create a Subscription for package
subscriptionName := genName("sub-")
cleanupSubscription := createSubscriptionForCatalog(crc, source.GetNamespace(), subscriptionName, source.GetName(), packageName, channelName, "", v1alpha1.ApprovalAutomatic)
defer cleanupSubscription()
// Wait for the Subscription to succeed
subscription, err := fetchSubscription(crc, testNamespace, subscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
// Wait for csv to succeed
_, err = fetchCSV(crc, subscription.Status.CurrentCSV, subscription.GetNamespace(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
registryCheckFunc := func(podList *corev1.PodList) bool {
if len(podList.Items) > 1 {
return false
}
return podList.Items[0].Status.ContainerStatuses[0].ImageID != ""
}
// get old catalog source pod
registryPod, err := awaitPods(GinkgoT(), c, source.GetNamespace(), selector.String(), registryCheckFunc)
// 3. Update image on registry via skopeo: this should trigger a newly updated version of the catalog source pod
// to be deployed after some time
// Make another skopeo pod to do the work of copying the image
if local {
_, err := skopeoLocalCopy(testImage, tag, catsrcImage, "new")
if err != nil {
GinkgoT().Fatalf("error copying new registry file: %s", err)
}
} else {
skopeoArgs := skopeoCopyCmd(testImage, tag, catsrcImage, "new", registryAuth)
err = createSkopeoPod(c, skopeoArgs, testNamespace)
if err != nil {
GinkgoT().Fatalf("error creating skopeo pod: %s", err)
}
// wait for skopeo pod to exit successfully
awaitPod(GinkgoT(), c, testNamespace, skopeo, func(pod *corev1.Pod) bool {
return pod.Status.Phase == corev1.PodSucceeded
})
err = deleteSkopeoPod(c, testNamespace)
if err != nil {
GinkgoT().Fatalf("error deleting skopeo pod: %s", err)
}
}
// update catalog source with annotation (to kick resync)
source, err = crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Get(context.TODO(), source.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err, "error awaiting registry pod")
source.Annotations = make(map[string]string)
source.Annotations["testKey"] = "testValue"
_, err = crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Update(context.TODO(), source, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err, "error awaiting registry pod")
time.Sleep(11 * time.Second)
// ensure new registry pod container image is as we expect
podCheckFunc := func(podList *corev1.PodList) bool {
fmt.Printf("pod list length %d\n", len(podList.Items))
for _, pod := range podList.Items {
fmt.Printf("pod list name %v\n", pod.Name)
}
for _, pod := range podList.Items {
fmt.Printf("old image id %s\n new image id %s\n", registryPod.Items[0].Status.ContainerStatuses[0].ImageID,
pod.Status.ContainerStatuses[0].ImageID)
if pod.Status.ContainerStatuses[0].ImageID != registryPod.Items[0].Status.ContainerStatuses[0].ImageID {
return true
}
}
// update catalog source with annotation (to kick resync)
source, err = crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Get(context.TODO(), source.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err, "error getting catalog source pod")
source.Annotations["testKey"] = genName("newValue")
_, err = crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Update(context.TODO(), source, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err, "error updating catalog source pod with test annotation")
return false
}
// await new catalog source and ensure old one was deleted
registryPods, err = awaitPodsWithInterval(GinkgoT(), c, source.GetNamespace(), selector.String(), 30*time.Second, 10*time.Minute, podCheckFunc)
require.NoError(GinkgoT(), err, "error awaiting registry pod")
require.NotNil(GinkgoT(), registryPods, "nil registry pods")
require.Equal(GinkgoT(), 1, len(registryPods.Items), "unexpected number of registry pods found")
// update catalog source with annotation (to kick resync)
source, err = crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Get(context.TODO(), source.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err, "error awaiting registry pod")
source.Annotations["testKey"] = "newValue"
_, err = crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Update(context.TODO(), source, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err, "error awaiting registry pod")
subChecker := func(sub *v1alpha1.Subscription) bool {
return sub.Status.InstalledCSV == "busybox.v2.0.0"
}
// Wait for the Subscription to succeed
subscription, err = fetchSubscription(crc, testNamespace, subscriptionName, subChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
// Wait for csv to succeed
csv, err := fetchCSV(crc, subscription.Status.CurrentCSV, subscription.GetNamespace(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// check version of running csv to ensure the latest version (0.9.2) was installed onto the cluster
v := csv.Spec.Version
busyboxVersion := semver.Version{
Major: 2,
Minor: 0,
Patch: 0,
}
if !reflect.DeepEqual(v, version.OperatorVersion{Version: busyboxVersion}) {
GinkgoT().Errorf("latest version of operator not installed: catalog souce update failed")
}
})
It("Dependency has correct replaces field", func() {
// Create a CatalogSource that contains the busybox v1 and busybox-dependency v1 images
// Create a Subscription for busybox v1, which has a dependency on busybox-dependency v1.
// Wait for the busybox and busybox2 Subscriptions to succeed
// Wait for the CSVs to succeed
// Update the catalog to point to an image that contains the busybox v2 and busybox-dependency v2 images.
// Wait for the new Subscriptions to succeed and check if they include the new CSVs
// Wait for the CSVs to succeed and confirm that the have the correct Spec.Replaces fields.
sourceName := genName("catalog-")
packageName := "busybox"
channelName := "alpha"
catSrcImage := "quay.io/olmtest/busybox-dependencies-index"
// Create gRPC CatalogSource
source := &v1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.CatalogSourceKind,
APIVersion: v1alpha1.CatalogSourceCRDAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: sourceName,
Namespace: testNamespace,
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: catSrcImage + ":1.0.0",
},
}
crc := newCRClient()
source, err := crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Create(context.TODO(), source, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Delete(context.TODO(), source.GetName(), metav1.DeleteOptions{}))
}()
// Create a Subscription for busybox
subscriptionName := genName("sub-")
cleanupSubscription := createSubscriptionForCatalog(crc, source.GetNamespace(), subscriptionName, source.GetName(), packageName, channelName, "", v1alpha1.ApprovalAutomatic)
defer cleanupSubscription()
// Wait for the Subscription to succeed
subscription, err := fetchSubscription(crc, testNamespace, subscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
require.Equal(GinkgoT(), subscription.Status.InstalledCSV, "busybox.v1.0.0")
// Confirm that a subscription was created for busybox-dependency
subscriptionList, err := crc.OperatorsV1alpha1().Subscriptions(source.GetNamespace()).List(context.TODO(), metav1.ListOptions{})
require.NoError(GinkgoT(), err)
dependencySubscriptionName := ""
for _, sub := range subscriptionList.Items {
if strings.HasPrefix(sub.GetName(), "busybox-dependency") {
dependencySubscriptionName = sub.GetName()
}
}
require.NotEmpty(GinkgoT(), dependencySubscriptionName)
// Wait for the Subscription to succeed
subscription, err = fetchSubscription(crc, testNamespace, dependencySubscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
require.Equal(GinkgoT(), subscription.Status.InstalledCSV, "busybox-dependency.v1.0.0")
// Update the catalog image
err = wait.PollImmediate(pollInterval, pollDuration, func() (bool, error) {
existingSource, err := crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Get(context.TODO(), sourceName, metav1.GetOptions{})
if err != nil {
return false, err
}
existingSource.Spec.Image = catSrcImage + ":2.0.0"
source, err = crc.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Update(context.TODO(), existingSource, metav1.UpdateOptions{})
if err == nil {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
// Wait for the busybox v2 Subscription to succeed
subChecker := func(sub *v1alpha1.Subscription) bool {
return sub.Status.InstalledCSV == "busybox.v2.0.0"
}
subscription, err = fetchSubscription(crc, testNamespace, subscriptionName, subChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
// Wait for busybox v2 csv to succeed and check the replaces field
csv, err := fetchCSV(crc, subscription.Status.CurrentCSV, subscription.GetNamespace(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), "busybox.v1.0.0", csv.Spec.Replaces)
// Wait for the busybox-dependency v2 Subscription to succeed
subChecker = func(sub *v1alpha1.Subscription) bool {
return sub.Status.InstalledCSV == "busybox-dependency.v2.0.0"
}
subscription, err = fetchSubscription(crc, testNamespace, dependencySubscriptionName, subChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
// Wait for busybox-dependency v2 csv to succeed and check the replaces field
csv, err = fetchCSV(crc, subscription.Status.CurrentCSV, subscription.GetNamespace(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), "busybox-dependency.v1.0.0", csv.Spec.Replaces)
})
})
const (
openshiftregistryFQDN = "image-registry.openshift-image-registry.svc:5000/openshift-operators"
catsrcImage = "docker://quay.io/olmtest/catsrc-update-test:"
)
func getOperatorDeployment(c operatorclient.ClientInterface, namespace string, operatorLabels labels.Set) (*appsv1.Deployment, error) {
deployments, err := c.ListDeploymentsWithLabels(namespace, operatorLabels)
if err != nil || deployments == nil || len(deployments.Items) != 1 {
return nil, fmt.Errorf("Error getting single operator deployment for label: %v", operatorLabels)
}
return &deployments.Items[0], nil
}
func rescaleDeployment(c operatorclient.ClientInterface, deployment *appsv1.Deployment) error {
// scale down
var replicas int32 = 0
deployment.Spec.Replicas = &replicas
deployment, updated, err := c.UpdateDeployment(deployment)
if err != nil || updated == false || deployment == nil {
return fmt.Errorf("Failed to scale down deployment")
}
waitForScaleup := func() (bool, error) {
fetchedDeployment, err := c.GetDeployment(deployment.GetNamespace(), deployment.GetName())
if err != nil {
return true, err
}
if fetchedDeployment.Status.Replicas == replicas {
return true, nil
}
return false, nil
}
// wait for deployment to scale down
err = wait.Poll(pollInterval, pollDuration, waitForScaleup)
if err != nil {
return err
}
// scale up
replicas = 1
deployment.Spec.Replicas = &replicas
deployment, updated, err = c.UpdateDeployment(deployment)
if err != nil || updated == false || deployment == nil {
return fmt.Errorf("Failed to scale up deployment")
}
// wait for deployment to scale up
err = wait.Poll(pollInterval, pollDuration, waitForScaleup)
return err
}
func replicateCatalogPod(t GinkgoTInterface, c operatorclient.ClientInterface, catalog *v1alpha1.CatalogSource) *corev1.Pod {
initialPods, err := c.KubernetesInterface().CoreV1().Pods(catalog.GetNamespace()).List(context.TODO(), metav1.ListOptions{LabelSelector: "olm.catalogSource=" + catalog.GetName()})
require.NoError(t, err)
require.Equal(t, 1, len(initialPods.Items))
pod := initialPods.Items[0]
copied := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: catalog.GetNamespace(),
Name: catalog.GetName() + "-copy",
},
Spec: pod.Spec,
}
copied, err = c.KubernetesInterface().CoreV1().Pods(catalog.GetNamespace()).Create(context.TODO(), copied, metav1.CreateOptions{})
require.NoError(t, err)
return copied
}
| [
"\"GITHUB_ACTIONS\""
]
| []
| [
"GITHUB_ACTIONS"
]
| [] | ["GITHUB_ACTIONS"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.