filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
train_deform.py
|
import os
import time
import argparse
import random
import numpy as np
import torch
import torch.nn.functional as F
import tensorflow as tf
from lib.network import DeformNet
from lib.loss import Loss
from data.pose_dataset import PoseDataset
from lib.utils import setup_logger, compute_sRT_errors
from lib.align import estimateSimilarityTransform
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='CAMERA', help='CAMERA or CAMERA+Real')
parser.add_argument('--data_dir', type=str, default='data', help='data directory')
parser.add_argument('--n_pts', type=int, default=1024, help='number of foreground points')
parser.add_argument('--n_cat', type=int, default=6, help='number of object categories')
parser.add_argument('--nv_prior', type=int, default=1024, help='number of vertices in shape priors')
parser.add_argument('--img_size', type=int, default=192, help='cropped image size')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--num_workers', type=int, default=10, help='number of data loading workers')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--start_epoch', type=int, default=1, help='which epoch to start')
parser.add_argument('--max_epoch', type=int, default=50, help='max number of epochs to train')
parser.add_argument('--resume_model', type=str, default='', help='resume from saved model')
parser.add_argument('--result_dir', type=str, default='results/camera', help='directory to save train results')
opt = parser.parse_args()
opt.decay_epoch = [0, 10, 20, 30, 40]
opt.decay_rate = [1.0, 0.6, 0.3, 0.1, 0.01]
opt.corr_wt = 1.0
opt.cd_wt = 5.0
opt.entropy_wt = 0.0001
opt.deform_wt = 0.01
def train_net():
# set result directory
if not os.path.exists(opt.result_dir):
os.makedirs(opt.result_dir)
tb_writer = tf.summary.FileWriter(opt.result_dir)
logger = setup_logger('train_log', os.path.join(opt.result_dir, 'log.txt'))
for key, value in vars(opt).items():
logger.info(key + ': ' + str(value))
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
# model & loss
estimator = DeformNet(opt.n_cat, opt.nv_prior)
estimator.cuda()
criterion = Loss(opt.corr_wt, opt.cd_wt, opt.entropy_wt, opt.deform_wt)
if opt.resume_model != '':
estimator.load_state_dict(torch.load(opt.resume_model))
# dataset
train_dataset = PoseDataset(opt.dataset, 'train', opt.data_dir, opt.n_pts, opt.img_size)
val_dataset = PoseDataset(opt.dataset, 'test', opt.data_dir, opt.n_pts, opt.img_size)
# start training
st_time = time.time()
train_steps = 1500
global_step = train_steps * (opt.start_epoch - 1)
n_decays = len(opt.decay_epoch)
assert len(opt.decay_rate) == n_decays
for i in range(n_decays):
if opt.start_epoch > opt.decay_epoch[i]:
decay_count = i
train_size = train_steps * opt.batch_size
indices = []
page_start = -train_size
for epoch in range(opt.start_epoch, opt.max_epoch + 1):
# train one epoch
logger.info('Time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + \
', ' + 'Epoch %02d' % epoch + ', ' + 'Training started'))
# create optimizer and adjust learning rate if needed
if decay_count < len(opt.decay_rate):
if epoch > opt.decay_epoch[decay_count]:
current_lr = opt.lr * opt.decay_rate[decay_count]
optimizer = torch.optim.Adam(estimator.parameters(), lr=current_lr)
decay_count += 1
# sample train subset
page_start += train_size
len_last = len(indices) - page_start
if len_last < train_size:
indices = indices[page_start:]
if opt.dataset == 'CAMERA+Real':
# CAMERA : Real = 3 : 1
camera_len = train_dataset.subset_len[0]
real_len = train_dataset.subset_len[1]
real_indices = list(range(camera_len, camera_len+real_len))
camera_indices = list(range(camera_len))
n_repeat = (train_size - len_last) // (4 * real_len) + 1
data_list = random.sample(camera_indices, 3*n_repeat*real_len) + real_indices*n_repeat
random.shuffle(data_list)
indices += data_list
else:
data_list = list(range(train_dataset.length))
for i in range((train_size - len_last) // train_dataset.length + 1):
random.shuffle(data_list)
indices += data_list
page_start = 0
train_idx = indices[page_start:(page_start+train_size)]
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_idx)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, sampler=train_sampler,
num_workers=opt.num_workers, pin_memory=True)
estimator.train()
for i, data in enumerate(train_dataloader, 1):
points, rgb, choose, cat_id, model, prior, sRT, nocs = data
points = points.cuda()
rgb = rgb.cuda()
choose = choose.cuda()
cat_id = cat_id.cuda()
model = model.cuda()
prior = prior.cuda()
sRT = sRT.cuda()
nocs = nocs.cuda()
assign_mat, deltas = estimator(points, rgb, choose, cat_id, prior)
loss, corr_loss, cd_loss, entropy_loss, deform_loss = criterion(assign_mat, deltas, prior, nocs, model)
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
# write results to tensorboard
summary = tf.Summary(value=[tf.Summary.Value(tag='learning_rate', simple_value=current_lr),
tf.Summary.Value(tag='train_loss', simple_value=loss),
tf.Summary.Value(tag='corr_loss', simple_value=corr_loss),
tf.Summary.Value(tag='cd_loss', simple_value=cd_loss),
tf.Summary.Value(tag='entropy_loss', simple_value=entropy_loss),
tf.Summary.Value(tag='deform_loss', simple_value=deform_loss)])
tb_writer.add_summary(summary, global_step)
if i % 10 == 0:
logger.info('Batch {0} Loss:{1:f}, corr_loss:{2:f}, cd_loss:{3:f}, entropy_loss:{4:f}, deform_loss:{5:f}'.format(
i, loss.item(), corr_loss.item(), cd_loss.item(), entropy_loss.item(), deform_loss.item()))
logger.info('>>>>>>>>----------Epoch {:02d} train finish---------<<<<<<<<'.format(epoch))
# evaluate one epoch
logger.info('Time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) +
', ' + 'Epoch %02d' % epoch + ', ' + 'Testing started'))
val_loss = 0.0
total_count = np.zeros((opt.n_cat,), dtype=int)
strict_success = np.zeros((opt.n_cat,), dtype=int) # 5 degree and 5 cm
easy_success = np.zeros((opt.n_cat,), dtype=int) # 10 degree and 5 cm
iou_success = np.zeros((opt.n_cat,), dtype=int) # relative scale error < 0.1
# sample validation subset
val_size = 1500
val_idx = random.sample(list(range(val_dataset.length)), val_size)
val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_idx)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=1, sampler=val_sampler,
num_workers=opt.num_workers, pin_memory=True)
estimator.eval()
for i, data in enumerate(val_dataloader, 1):
points, rgb, choose, cat_id, model, prior, sRT, nocs = data
points = points.cuda()
rgb = rgb.cuda()
choose = choose.cuda()
cat_id = cat_id.cuda()
model = model.cuda()
prior = prior.cuda()
sRT = sRT.cuda()
nocs = nocs.cuda()
assign_mat, deltas = estimator(points, rgb, choose, cat_id, prior)
loss, _, _, _, _ = criterion(assign_mat, deltas, prior, nocs, model)
# estimate pose and scale
inst_shape = prior + deltas
assign_mat = F.softmax(assign_mat, dim=2)
nocs_coords = torch.bmm(assign_mat, inst_shape)
nocs_coords = nocs_coords.detach().cpu().numpy()[0]
points = points.cpu().numpy()[0]
# use choose to remove repeated points
choose = choose.cpu().numpy()[0]
_, choose = np.unique(choose, return_index=True)
nocs_coords = nocs_coords[choose, :]
points = points[choose, :]
_, _, _, pred_sRT = estimateSimilarityTransform(nocs_coords, points)
# evaluate pose
cat_id = cat_id.item()
if pred_sRT is not None:
sRT = sRT.detach().cpu().numpy()[0]
R_error, T_error, IoU = compute_sRT_errors(pred_sRT, sRT)
if R_error < 5 and T_error < 0.05:
strict_success[cat_id] += 1
if R_error < 10 and T_error < 0.05:
easy_success[cat_id] += 1
if IoU < 0.1:
iou_success[cat_id] += 1
total_count[cat_id] += 1
val_loss += loss.item()
if i % 100 == 0:
logger.info('Batch {0} Loss:{1:f}'.format(i, loss.item()))
# compute accuracy
strict_acc = 100 * (strict_success / total_count)
easy_acc = 100 * (easy_success / total_count)
iou_acc = 100 * (iou_success / total_count)
for i in range(opt.n_cat):
logger.info('{} accuracies:'.format(val_dataset.cat_names[i]))
logger.info('5^o 5cm: {:4f}'.format(strict_acc[i]))
logger.info('10^o 5cm: {:4f}'.format(easy_acc[i]))
logger.info('IoU < 0.1: {:4f}'.format(iou_acc[i]))
strict_acc = np.mean(strict_acc)
easy_acc = np.mean(easy_acc)
iou_acc = np.mean(iou_acc)
val_loss = val_loss / val_size
summary = tf.Summary(value=[tf.Summary.Value(tag='val_loss', simple_value=val_loss),
tf.Summary.Value(tag='5^o5cm_acc', simple_value=strict_acc),
tf.Summary.Value(tag='10^o5cm_acc', simple_value=easy_acc),
tf.Summary.Value(tag='iou_acc', simple_value=iou_acc)])
tb_writer.add_summary(summary, global_step)
logger.info('Epoch {0:02d} test average loss: {1:06f}'.format(epoch, val_loss))
logger.info('Overall accuracies:')
logger.info('5^o 5cm: {:4f} 10^o 5cm: {:4f} IoU: {:4f}'.format(strict_acc, easy_acc, iou_acc))
logger.info('>>>>>>>>----------Epoch {:02d} test finish---------<<<<<<<<'.format(epoch))
# save model after each epoch
torch.save(estimator.state_dict(), '{0}/model_{1:02d}.pth'.format(opt.result_dir, epoch))
if __name__ == '__main__':
train_net()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
molecule/default/tests/test_installation.py
|
"""
Role tests
"""
import os
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_package(host):
"""
Ensure package installed
"""
if host.system_info.distribution in ('debian', 'ubuntu'):
assert host.package('xvfb').is_installed
else:
assert host.package('xorg-x11-server-Xvfb').is_installed
def test_service(host):
"""
Ensure service running
"""
xvfb_service = host.service('xvfb')
assert xvfb_service.is_enabled
assert xvfb_service.is_running
def test_process(host):
"""
Ensure process running
"""
xvfb_process = host.process.get(user='root', comm='Xvfb')
assert ':99 -screen 0 1x1x24 -ac +extension GLX +render -noreset' in \
xvfb_process.args
assert len(host.process.filter(comm='Xvfb')) == 1
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
EasyStocks/wsgi.py
|
"""
WSGI config for EasyStocks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EasyStocks.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
litex_setup.py
|
#!/usr/bin/env python3
import os
import sys
import time
import subprocess
import shutil
import hashlib
import argparse
import urllib.request
start_time = time.time()
current_path = os.path.abspath(os.curdir)
# Helpers ------------------------------------------------------------------------------------------
def colorer(s, color="bright"): # FIXME: Move colorer to litex.common?
header = {
"bright" : "\x1b[1m",
"green" : "\x1b[1m\x1b[32m",
"cyan" : "\x1b[1m\x1b[36m",
"red" : "\x1b[1m\x1b[31m",
"yellow" : "\x1b[1m\x1b[33m",
"underline" : "\x1b[1m\x1b[4m"}[color]
trailer = "\x1b[0m"
return header + str(s) + trailer
def print_banner():
b = []
b.append(" __ _ __ _ __ ")
b.append(" / / (_) /____ | |/_/ ")
b.append(" / /__/ / __/ -_)> < ")
b.append(" /____/_/\\__/\\__/_/|_| ")
b.append(" Build your hardware, easily! ")
b.append(" LiteX Setup utility. ")
b.append("")
print("\n".join(b))
def print_status(status):
exec_time = (time.time() - start_time)
print(colorer(f"[{exec_time:8.3f}]", color="green") + " " + colorer(status))
def print_error(status):
exec_time = (time.time() - start_time)
print(colorer(f"[{exec_time:8.3f}]", color="red") + " " + colorer(status))
class SetupError(Exception):
def __init__(self):
sys.stderr = None # Error already described, avoid traceback/exception.
# Git repositories ---------------------------------------------------------------------------------
# Get SHA1: git rev-parse --short=7 HEAD
class GitRepo:
def __init__(self, url, clone="regular", develop=True, sha1=None, branch="master"):
assert clone in ["regular", "recursive"]
self.url = url
self.clone = clone
self.develop = develop
self.sha1 = sha1
self.branch = branch
git_repos = {
# HDL.
"migen": GitRepo(url="https://github.com/m-labs/", clone="recursive"),
"amaranth": GitRepo(url="https://github.com/amaranth-lang/", branch="main"),
# LiteX SoC builder
"pythondata-software-picolibc": GitRepo(url="https://github.com/litex-hub/", clone="recursive"),
"pythondata-software-compiler_rt": GitRepo(url="https://github.com/litex-hub/"),
"litex": GitRepo(url="https://github.com/enjoy-digital/"),
# LiteX Cores Ecosystem.
"liteeth": GitRepo(url="https://github.com/enjoy-digital/"),
"litedram": GitRepo(url="https://github.com/enjoy-digital/"),
"litepcie": GitRepo(url="https://github.com/enjoy-digital/"),
"litesata": GitRepo(url="https://github.com/enjoy-digital/"),
"litesdcard": GitRepo(url="https://github.com/enjoy-digital/"),
"liteiclink": GitRepo(url="https://github.com/enjoy-digital/"),
"litescope": GitRepo(url="https://github.com/enjoy-digital/"),
"litejesd204b": GitRepo(url="https://github.com/enjoy-digital/"),
"litespi": GitRepo(url="https://github.com/litex-hub/"),
"litehyperbus": GitRepo(url="https://github.com/litex-hub/"),
# LiteX Boards.
"litex-boards": GitRepo(url="https://github.com/litex-hub/", clone="regular"),
# LiteX pythondata.
"pythondata-misc-tapcfg": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-misc-usb_ohci": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-lm32": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-mor1kx": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-picorv32": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-serv": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-vexriscv": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-vexriscv-smp": GitRepo(url="https://github.com/litex-hub/", clone="recursive"),
"pythondata-cpu-rocket": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-minerva": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-microwatt": GitRepo(url="https://github.com/litex-hub/", sha1=0xdad611c),
"pythondata-cpu-blackparrot": GitRepo(url="https://github.com/litex-hub/"),
"pythondata-cpu-cv32e40p": GitRepo(url="https://github.com/litex-hub/", clone="recursive"),
"pythondata-cpu-ibex": GitRepo(url="https://github.com/litex-hub/", clone="recursive", sha1=0xd3d53df),
}
# Installs -----------------------------------------------------------------------------------------
# Minimal: Only Migen + LiteX.
minimal_repos = ["migen", "litex"]
# Standard: Migen + LiteX + Cores + Software + Popular CPUs (LM32, Mor1kx, SERV, VexRiscv).
standard_repos = list(git_repos.keys())
standard_repos.remove("amaranth")
standard_repos.remove("pythondata-cpu-picorv32")
standard_repos.remove("pythondata-cpu-rocket")
standard_repos.remove("pythondata-cpu-minerva")
standard_repos.remove("pythondata-cpu-microwatt")
standard_repos.remove("pythondata-cpu-blackparrot")
standard_repos.remove("pythondata-cpu-cv32e40p")
standard_repos.remove("pythondata-cpu-ibex")
# Full: Migen + LiteX + Cores + Software + All CPUs.
full_repos = list(git_repos.keys())
# Installs:
install_configs = {
"minimal" : minimal_repos,
"standard" : standard_repos,
"full" : full_repos,
}
# Script location / auto-update --------------------------------------------------------------------
def litex_setup_location_check():
# Check if script is executed inside a cloned LiteX repository or alongside?
if os.path.exists(".gitignore"):
global current_path
current_path = os.path.join(current_path, "../")
def litex_setup_auto_update():
litex_setup_url = "https://raw.githubusercontent.com/enjoy-digital/litex/master/litex_setup.py"
current_sha1 = hashlib.sha1(open(os.path.realpath(__file__)).read().encode("utf-8")).hexdigest()
print_status("LiteX Setup auto-update...")
try:
import requests
r = requests.get(litex_setup_url)
if r.status_code != 404:
upstream_sha1 = hashlib.sha1(r.content).hexdigest()
if current_sha1 != upstream_sha1:
print_status("LiteX Setup is obsolete, updating.")
with open(os.path.realpath(__file__), "wb") as f:
f.write(r.content)
os.execl(sys.executable, sys.executable, *sys.argv)
else:
print_status("LiteX Setup is up to date.")
except:
pass
# Git repositories initialization ------------------------------------------------------------------
def litex_setup_init_repos(config="standard", dev_mode=False):
print_status("Initializing Git repositories...")
print_status("--------------------------------")
for name in install_configs[config]:
repo = git_repos[name]
os.chdir(os.path.join(current_path))
if not os.path.exists(name):
# Clone Repo.
print_status(f"Cloning {name} Git repository...")
repo_url = repo.url
if dev_mode:
repo_url = repo_url.replace("https://github.com/", "[email protected]:")
subprocess.check_call("git clone {url} {options}".format(
url = repo_url + name + ".git",
options = "--recursive" if repo.clone == "recursive" else ""
), shell=True)
# Use specific SHA1 (Optional).
if repo.sha1 is not None:
os.chdir(os.path.join(current_path, name))
os.system(f"git checkout {repo.sha1:07x}")
else:
print_status(f"{name} Git Repo already present.")
# Git repositories update --------------------------------------------------------------------------
def litex_setup_update_repos(config="standard"):
print_status("Updating Git repositories...")
print_status("----------------------------")
for name in install_configs[config]:
repo = git_repos[name]
os.chdir(os.path.join(current_path))
# Check if Repo is present.
if not os.path.exists(name):
print_error(f"{name} Git repository is not initialized, please run --init first.")
raise SetupError
# Update Repo.
print_status(f"Updating {name} Git repository...")
os.chdir(os.path.join(current_path, name))
subprocess.check_call("git checkout " + repo.branch, shell=True)
subprocess.check_call("git pull --ff-only", shell=True)
# Recursive Update (Optional).
if repo.clone == "recursive":
subprocess.check_call("git submodule update --init --recursive", shell=True)
# Use specific SHA1 (Optional).
if repo.sha1 is not None:
os.chdir(os.path.join(current_path, name))
os.system(f"git checkout {repo.sha1:07x}")
# Git repositories install -------------------------------------------------------------------------
def litex_setup_install_repos(config="standard", user_mode=False):
print_status("Installing Git repositories...")
print_status("------------------------------")
for name in install_configs[config]:
repo = git_repos[name]
os.chdir(os.path.join(current_path))
# Install Repo.
if repo.develop:
print_status(f"Installing {name} Git repository...")
os.chdir(os.path.join(current_path, name))
subprocess.check_call("python3 setup.py develop {options}".format(
options="--user" if user_mode else "",
), shell=True)
if user_mode:
if ".local/bin" not in os.environ.get("PATH", ""):
print_status("Make sure that ~/.local/bin is in your PATH")
print_status("export PATH=$PATH:~/.local/bin")
# GCC toolchains download --------------------------------------------------------------------------
def gcc_toolchain_download(url, filename):
print_status("Downloading GCC toolchain...")
print_status("----------------------------")
if not os.path.exists(filename):
full_url = url + filename
print_status(f"Downloading {full_url} to {filename}...")
urllib.request.urlretrieve(full_url, filename)
else:
print_status(f"Using existing file {filename}.")
print_status(f"Extracting {filename}...")
shutil.unpack_archive(filename)
# RISC-V toolchain.
# -----------------
def riscv_gcc_toolchain_download():
base_url = "https://static.dev.sifive.com/dev-tools/"
base_file = "riscv64-unknown-elf-gcc-8.3.0-2019.08.0-x86_64-"
# Windows
if (sys.platform.startswith("win") or sys.platform.startswith("cygwin")):
end_file = "w64-mingw32.zip"
# Linux
elif sys.platform.startswith("linux"):
os_release = (open("/etc/os-release").read()).lower()
if "fedora" in os_release:
end_file = "linux-centos6.tar.gz"
else:
end_file = "linux-ubuntu14.tar.gz"
# Mac OS
elif sys.platform.startswith("darwin"):
end_file = "apple-darwin.tar.gz"
else:
raise NotImplementedError(sys.platform)
# Download/Extract.
gcc_toolchain_download(url=base_url, filename=base_file + end_file)
# PowerPC toolchain download.
# ---------------------------
def powerpc_gcc_toolchain_download():
base_url = "https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc64le-power8/tarballs/"
base_file = "powerpc64le-power8--musl--stable-2020.08-1.tar.bz2"
# Download/Extract.
gcc_toolchain_download(url=base_url, filename=base_file)
# OpenRISC toolchain download.
# ----------------------------
def openrisc_gcc_toolchain_download():
base_url = "https://toolchains.bootlin.com/downloads/releases/toolchains/openrisc/tarballs/"
base_file = "openrisc--musl--stable-2020.08-1.tar.bz2"
# Download/Extract.
gcc_toolchain_download(url=base_url, filename=base_file)
# LM32 toolchain download.
def lm32_gcc_toolchain_download():
base_url = ""
base_file = ""
raise NotImplementedError
# Run ----------------------------------------------------------------------------------------------
def main():
print_banner()
parser = argparse.ArgumentParser()
# Git Repositories.
parser.add_argument("--init", action="store_true", help="Initialize Git repositories.")
parser.add_argument("--update", action="store_true", help="Update Git repositories.")
parser.add_argument("--install", action="store_true", help="Install Git repositories.")
parser.add_argument("--user", action="store_true", help="Install in User-Mode.")
parser.add_argument("--config", default="standard", help="Install config (minimal, standard, full).")
# GCC toolchains.
parser.add_argument("--gcc", default=None, help="Download/Extract GCC Toolchain (riscv, powerpc, openrisc or lm32).")
# Development mode.
parser.add_argument("--dev", action="store_true", help="Development-Mode (no Auto-Update of litex_setup.py / Switch to [email protected] URLs).")
# Retro-compatibility.
parser.add_argument("compat_args", nargs="*", help="Retro-Compatibility arguments (init, update, install or gcc).")
args = parser.parse_args()
# Handle compat_args.
if args.compat_args is not None:
for arg in args.compat_args:
if arg in ["init", "update", "install"]:
setattr(args, arg, True)
if arg in ["gcc"]:
args.gcc = "riscv"
# Location/Auto-Update.
litex_setup_location_check()
if not args.dev:
litex_setup_auto_update()
# Init.
if args.init:
litex_setup_init_repos(config=args.config, dev_mode=args.dev)
# Update.
if args.update:
litex_setup_update_repos(config=args.config)
# Install.
if args.install:
litex_setup_install_repos(config=args.config, user_mode=args.user)
# GCC.
os.chdir(os.path.join(current_path))
if args.gcc == "riscv":
riscv_gcc_toolchain_download()
if args.gcc == "powerpc":
powerpc_gcc_toolchain_download()
if args.gcc == "openrisc":
openrisc_gcc_toolchain_download()
if args.gcc == "lm32":
lm32_gcc_toolchain_download()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
workflow/executor/src/test/java/com/asakusafw/workflow/executor/basic/BasicDeleteTaskExecutorTest.java
|
/**
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.workflow.executor.basic;
import static org.hamcrest.Matchers.*;
import static org.junit.Assert.*;
import java.io.File;
import java.util.Collections;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import com.asakusafw.workflow.executor.TaskExecutionContext;
import com.asakusafw.workflow.executor.TaskExecutor;
import com.asakusafw.workflow.model.DeleteTaskInfo.PathKind;
import com.asakusafw.workflow.model.basic.BasicDeleteTaskInfo;
/**
* Test for {@link BasicDeleteTaskExecutor}.
*/
public class BasicDeleteTaskExecutorTest {
/**
* temporary folder.
*/
@Rule
public final TemporaryFolder temporary = new TemporaryFolder();
private final TaskExecutionContext context = new BasicTaskExecutionContext(
new BasicExecutionContext()
.withEnvironmentVariables(m -> m.putAll(System.getenv())),
"b", "f", "e",
Collections.singletonMap("testing", "OK"));
/**
* simple case.
* @throws Exception if failed
*/
@Test
public void simple() throws Exception {
TaskExecutor executor = new BasicDeleteTaskExecutor();
temporary.newFile();
executor.execute(context, new BasicDeleteTaskInfo(
"testing",
PathKind.LOCAL_FILE_SYSTEM,
temporary.getRoot().getAbsolutePath()));
assertThat(temporary.getRoot().exists(), is(false));
}
/**
* attempt to delete missing files.
* @throws Exception if failed
*/
@Test
public void missing() throws Exception {
TaskExecutor executor = new BasicDeleteTaskExecutor();
executor.execute(context, new BasicDeleteTaskInfo(
"testing",
PathKind.LOCAL_FILE_SYSTEM,
new File(temporary.getRoot(), "__MISSING__").getAbsolutePath()));
assertThat(temporary.getRoot().exists(), is(true));
}
/**
* w/ variables.
* @throws Exception if failed
*/
@Test
public void vars() throws Exception {
TaskExecutor executor = new BasicDeleteTaskExecutor();
File batch = temporary.newFile(context.getBatchId());
File flow = temporary.newFile(context.getFlowId());
File exec = temporary.newFile(context.getExecutionId());
assertThat(batch.isFile(), is(true));
assertThat(flow.isFile(), is(true));
assertThat(exec.isFile(), is(true));
executor.execute(context, new BasicDeleteTaskInfo(
"testing",
PathKind.LOCAL_FILE_SYSTEM,
new File(temporary.getRoot(), "${batch_id}").getAbsolutePath()));
assertThat(batch.isFile(), is(false));
assertThat(flow.isFile(), is(true));
assertThat(exec.isFile(), is(true));
executor.execute(context, new BasicDeleteTaskInfo(
"testing",
PathKind.LOCAL_FILE_SYSTEM,
new File(temporary.getRoot(), "${flow_id}").getAbsolutePath()));
assertThat(batch.isFile(), is(false));
assertThat(flow.isFile(), is(false));
assertThat(exec.isFile(), is(true));
executor.execute(context, new BasicDeleteTaskInfo(
"testing",
PathKind.LOCAL_FILE_SYSTEM,
new File(temporary.getRoot(), "${execution_id}").getAbsolutePath()));
assertThat(batch.isFile(), is(false));
assertThat(flow.isFile(), is(false));
assertThat(exec.isFile(), is(false));
executor.execute(context, new BasicDeleteTaskInfo(
"testing",
PathKind.LOCAL_FILE_SYSTEM,
new File(temporary.getRoot(), "${user}").getAbsolutePath()));
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
_examples/configuration.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//go:build ignore
// +build ignore
package main
import (
"crypto/tls"
"log"
"net"
"net/http"
"time"
"github.com/elastic/go-elasticsearch/v8"
)
func main() {
log.SetFlags(0)
// This example demonstrates how to configure the client's Transport.
//
// NOTE: These values are for illustrative purposes only, and not suitable
// for any production use. The default transport is sufficient.
//
cfg := elasticsearch.Config{
Addresses: []string{"http://localhost:9200"},
Transport: &http.Transport{
MaxIdleConnsPerHost: 10,
ResponseHeaderTimeout: time.Millisecond,
DialContext: (&net.Dialer{Timeout: time.Nanosecond}).DialContext,
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
// ...
},
},
}
es, err := elasticsearch.NewClient(cfg)
if err != nil {
log.Printf("Error creating the client: %s", err)
} else {
log.Println(es.Info())
// => dial tcp: i/o timeout
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
Chapter10/Files/lambda/registryCreator/index.py
|
import os
import logging
import boto3
from botocore.exceptions import ClientError
sm = boto3.client("sagemaker")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
logger.debug("## Environment Variables ##")
logger.debug(os.environ)
logger.debug("## Event ##")
logger.debug(event)
props = event["ResourceProperties"]
group_name = props["GroupName"]
if event["RequestType"] == "Create":
try:
response = sm.create_model_package_group(
ModelPackageGroupName=group_name,
ModelPackageGroupDescription="Models Package Group for Production Models",
Tags=[
{
"Key": "Name",
"Value": group_name
}
]
)
package_arn = response["ModelPackageGroupArn"]
logger.info(f"Created Model Model Package Group: {package_arn}")
return {
"PhysicalResourceId": group_name,
"Data": {
"ModelPackageArn": package_arn
}
}
except ClientError as e:
error_message = e.response["Error"]["Message"]
logging.error(f"Failed to create Model Package Group: {error_message}")
raise Exception(error_message)
elif event["RequestType"] == "Delete":
try:
response = sm.list_model_packages(
ModelPackageGroupName=group_name,
ModelApprovalStatus="Approved",
SortBy="CreationTime",
MaxResults=100
)
for model_package in response["ModelPackageSummaryList"]:
sm.delete_model_package(ModelPackageName=model_package["ModelPackageArn"])
sm.delete_model_package_group(ModelPackageGroupName=group_name)
logger.info(f"Deleted Model Package Group: {group_name}")
return {
"PhysicalResourceId": group_name,
"Data":{}
}
except ClientError as e:
error_message = e.response["Error"]["Messgae"]
logger.error(f"Failed to delete Model Package Group: {error_message}")
raise Exception(error_message)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/sources/test_header_version.py
|
#!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test header version."""
import unittest
import os
import fnmatch
ignoredProtos = [
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba/clients/studio/plugins/ThymioVPL/UsageProfile.proto',
'projects/samples/tutorials/protos/FourWheelsRobot.proto'
]
skippedDirectories = [
'dependencies',
'distribution',
'.git'
]
class TestHeaderVersion(unittest.TestCase):
"""Unit test of the PROTO and world headers."""
def setUp(self):
"""Get all the PROTO files to be tested."""
# 1. Get Webots version (without revision)
self.version = None
with open(os.environ['WEBOTS_HOME'] + os.sep + 'resources' + os.sep + 'version.txt') as file:
content = file.read()
self.version = content.splitlines()[0].strip().split()[0]
# 2. Get all the PROTO files
self.files = []
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.proto'):
proto = os.path.join(rootPath, fileName)
shouldIgnore = False
for ignoredProto in ignoredProtos:
path = os.environ['WEBOTS_HOME'] + os.sep + ignoredProto.replace('/', os.sep)
if proto == path:
shouldIgnore = True
break
if not shouldIgnore:
self.files.append((proto, '#VRML_SIM %s utf8' % self.version))
# 3. Get all the world files
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.wbt'):
world = os.path.join(rootPath, fileName)
self.files.append((world, '#VRML_SIM %s utf8' % self.version))
# 4. Get all the .wbproj files
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.wbproj'):
projFile = os.path.join(rootPath, fileName)
self.files.append((projFile, 'Webots Project File version %s' % self.version))
def test_header_version(self):
"""Test that the PROTO and world files have the correct header."""
for currentFile in self.files:
fileToTest = currentFile[0]
with open(fileToTest) as file:
content = file.read()
if content == '':
continue
line = content.splitlines()[0].strip()
self.assertTrue(
line.startswith(currentFile[1]),
msg='Wrong header in file: "%s"' % fileToTest
)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"WEBOTS_HOME"
] |
[]
|
["WEBOTS_HOME"]
|
python
| 1 | 0 | |
dependencies/panda/direct/dist/FreezeTool.py
|
""" This module contains code to freeze a number of Python modules
into a single (mostly) standalone DLL or EXE. """
import modulefinder
import sys
import os
import marshal
import imp
import platform
import struct
import io
import distutils.sysconfig as sysconf
import zipfile
import importlib
from . import pefile
# Temporary (?) try..except to protect against unbuilt p3extend_frozen.
try:
import p3extend_frozen
except ImportError:
p3extend_frozen = None
from panda3d.core import *
# Check to see if we are running python_d, which implies we have a
# debug build, and we have to build the module with debug options.
# This is only relevant on Windows.
# I wonder if there's a better way to determine this?
python = os.path.splitext(os.path.split(sys.executable)[1])[0]
isDebugBuild = (python.lower().endswith('_d'))
# These are modules that Python always tries to import up-front. They
# must be frozen in any main.exe.
# NB. if encodings are removed, be sure to remove them from the shortcut in
# deploy-stub.c.
startupModules = [
'imp', 'encodings', 'encodings.*',
]
if sys.version_info >= (3, 0):
# Modules specific to Python 3
startupModules += ['io', 'marshal', 'importlib.machinery', 'importlib.util']
else:
# Modules specific to Python 2
startupModules += []
# These are some special init functions for some built-in Python modules that
# deviate from the standard naming convention. A value of None means that a
# dummy entry should be written to the inittab.
builtinInitFuncs = {
'builtins': None,
'__builtin__': None,
'sys': None,
'exceptions': None,
'_warnings': '_PyWarnings_Init',
'marshal': 'PyMarshal_Init',
}
if sys.version_info < (3, 7):
builtinInitFuncs['_imp'] = 'PyInit_imp'
# These are modules that are not found normally for these modules. Add them
# to an include list so users do not have to do this manually.
try:
from pytest import freeze_includes as pytest_imports
except ImportError:
def pytest_imports():
return []
hiddenImports = {
'pytest': pytest_imports(),
'pkg_resources': [
'pkg_resources.*.*',
],
'xml.etree.cElementTree': ['xml.etree.ElementTree'],
'datetime': ['_strptime'],
'keyring.backends': ['keyring.backends.*'],
'matplotlib.font_manager': ['encodings.mac_roman'],
'direct.particles': ['direct.particles.ParticleManagerGlobal'],
'numpy.core._multiarray_umath': [
'numpy.core._internal',
'numpy.core._dtype_ctypes',
'numpy.core._methods',
],
}
if sys.version_info >= (3,):
hiddenImports['matplotlib.backends._backend_tk'] = ['tkinter']
else:
hiddenImports['matplotlib.backends._backend_tk'] = ['Tkinter']
# These are overrides for specific modules.
overrideModules = {
# Used by the warnings module, among others, to get line numbers. Since
# we set __file__, this would cause it to try and extract Python code
# lines from the main executable, which we don't want.
'linecache': """__all__ = ["getline", "clearcache", "checkcache"]
cache = {}
def getline(filename, lineno, module_globals=None):
return ''
def clearcache():
global cache
cache = {}
def getlines(filename, module_globals=None):
return []
def checkcache(filename=None):
pass
def updatecache(filename, module_globals=None):
pass
def lazycache(filename, module_globals):
pass
""",
}
# These are missing modules that we've reported already this session.
reportedMissing = {}
class CompilationEnvironment:
""" Create an instance of this class to record the commands to
invoke the compiler on a given platform. If needed, the caller
can create a custom instance of this class (or simply set the
compile strings directly) to customize the build environment. """
def __init__(self, platform):
self.platform = platform
# The command to compile a c to an object file. Replace %(basename)s
# with the basename of the source file, and an implicit .c extension.
self.compileObj = 'error'
# The command to link a single object file into an executable. As
# above, replace $(basename)s with the basename of the original source
# file, and of the target executable.
self.linkExe = 'error'
# The command to link a single object file into a shared library.
self.linkDll = 'error'
# Paths to Python stuff.
self.Python = None
self.PythonIPath = sysconf.get_python_inc()
self.PythonVersion = sysconf.get_config_var("LDVERSION") or sysconf.get_python_version()
# The VC directory of Microsoft Visual Studio (if relevant)
self.MSVC = None
# Directory to Windows Platform SDK (if relevant)
self.PSDK = None
# The setting to control release vs. debug builds. Only relevant on
# Windows.
self.MD = None
# Added to the path to the MSVC bin and lib directories on 64-bits Windows.
self.suffix64 = ''
# The _d extension to add to dll filenames on Windows in debug builds.
self.dllext = ''
# Any architecture-specific string.
self.arch = ''
self.determineStandardSetup()
def determineStandardSetup(self):
if self.platform.startswith('win'):
self.Python = sysconf.PREFIX
if ('VCINSTALLDIR' in os.environ):
self.MSVC = os.environ['VCINSTALLDIR']
elif (Filename('/c/Program Files/Microsoft Visual Studio 9.0/VC').exists()):
self.MSVC = Filename('/c/Program Files/Microsoft Visual Studio 9.0/VC').toOsSpecific()
elif (Filename('/c/Program Files (x86)/Microsoft Visual Studio 9.0/VC').exists()):
self.MSVC = Filename('/c/Program Files (x86)/Microsoft Visual Studio 9.0/VC').toOsSpecific()
elif (Filename('/c/Program Files/Microsoft Visual Studio .NET 2003/Vc7').exists()):
self.MSVC = Filename('/c/Program Files/Microsoft Visual Studio .NET 2003/Vc7').toOsSpecific()
else:
print('Could not locate Microsoft Visual C++ Compiler! Try running from the Visual Studio Command Prompt.')
sys.exit(1)
if ('WindowsSdkDir' in os.environ):
self.PSDK = os.environ['WindowsSdkDir']
elif (platform.architecture()[0] == '32bit' and Filename('/c/Program Files/Microsoft Platform SDK for Windows Server 2003 R2').exists()):
self.PSDK = Filename('/c/Program Files/Microsoft Platform SDK for Windows Server 2003 R2').toOsSpecific()
elif (os.path.exists(os.path.join(self.MSVC, 'PlatformSDK'))):
self.PSDK = os.path.join(self.MSVC, 'PlatformSDK')
else:
print('Could not locate the Microsoft Windows Platform SDK! Try running from the Visual Studio Command Prompt.')
sys.exit(1)
# We need to use the correct compiler setting for debug vs. release builds.
self.MD = '/MD'
if isDebugBuild:
self.MD = '/MDd'
self.dllext = '_d'
# MSVC/bin and /lib directories have a different location
# for win64.
if self.platform == 'win_amd64':
self.suffix64 = '\\amd64'
# If it is run by makepanda, it handles the MSVC and PlatformSDK paths itself.
if ('MAKEPANDA' in os.environ):
self.compileObjExe = 'cl /wd4996 /Fo%(basename)s.obj /nologo /c %(MD)s /Zi /O2 /Ob2 /EHsc /Zm300 /W3 /I"%(pythonIPath)s" %(filename)s'
self.compileObjDll = self.compileObjExe
self.linkExe = 'link /nologo /MAP:NUL /FIXED:NO /OPT:REF /STACK:4194304 /INCREMENTAL:NO /LIBPATH:"%(python)s\\libs" /out:%(basename)s.exe %(basename)s.obj'
self.linkDll = 'link /nologo /DLL /MAP:NUL /FIXED:NO /OPT:REF /INCREMENTAL:NO /LIBPATH:"%(python)s\\libs" /out:%(basename)s%(dllext)s.pyd %(basename)s.obj'
else:
os.environ['PATH'] += ';' + self.MSVC + '\\bin' + self.suffix64 + ';' + self.MSVC + '\\Common7\\IDE;' + self.PSDK + '\\bin'
self.compileObjExe = 'cl /wd4996 /Fo%(basename)s.obj /nologo /c %(MD)s /Zi /O2 /Ob2 /EHsc /Zm300 /W3 /I"%(pythonIPath)s" /I"%(PSDK)s\\include" /I"%(MSVC)s\\include" %(filename)s'
self.compileObjDll = self.compileObjExe
self.linkExe = 'link /nologo /MAP:NUL /FIXED:NO /OPT:REF /STACK:4194304 /INCREMENTAL:NO /LIBPATH:"%(PSDK)s\\lib" /LIBPATH:"%(MSVC)s\\lib%(suffix64)s" /LIBPATH:"%(python)s\\libs" /out:%(basename)s.exe %(basename)s.obj'
self.linkDll = 'link /nologo /DLL /MAP:NUL /FIXED:NO /OPT:REF /INCREMENTAL:NO /LIBPATH:"%(PSDK)s\\lib" /LIBPATH:"%(MSVC)s\\lib%(suffix64)s" /LIBPATH:"%(python)s\\libs" /out:%(basename)s%(dllext)s.pyd %(basename)s.obj'
elif self.platform.startswith('osx_'):
# OSX
proc = self.platform.split('_', 1)[1]
if proc == 'i386':
self.arch = '-arch i386'
elif proc == 'ppc':
self.arch = '-arch ppc'
elif proc == 'amd64':
self.arch = '-arch x86_64'
self.compileObjExe = "gcc -c %(arch)s -o %(basename)s.o -O2 -I%(pythonIPath)s %(filename)s"
self.compileObjDll = "gcc -fPIC -c %(arch)s -o %(basename)s.o -O2 -I%(pythonIPath)s %(filename)s"
self.linkExe = "gcc %(arch)s -o %(basename)s %(basename)s.o -framework Python"
self.linkDll = "gcc %(arch)s -undefined dynamic_lookup -bundle -o %(basename)s.so %(basename)s.o"
else:
# Unix
lib_dir = sysconf.get_python_lib(plat_specific=1, standard_lib=1)
#python_a = os.path.join(lib_dir, "config", "libpython%(pythonVersion)s.a")
self.compileObjExe = "%(CC)s %(CFLAGS)s -c -o %(basename)s.o -pthread -O2 %(filename)s -I%(pythonIPath)s"
self.compileObjDll = "%(CC)s %(CFLAGS)s %(CCSHARED)s -c -o %(basename)s.o -O2 %(filename)s -I%(pythonIPath)s"
self.linkExe = "%(CC)s -o %(basename)s %(basename)s.o -L/usr/local/lib -lpython%(pythonVersion)s"
self.linkDll = "%(LDSHARED)s -o %(basename)s.so %(basename)s.o -L/usr/local/lib -lpython%(pythonVersion)s"
if (os.path.isdir("/usr/PCBSD/local/lib")):
self.linkExe += " -L/usr/PCBSD/local/lib"
self.linkDll += " -L/usr/PCBSD/local/lib"
def compileExe(self, filename, basename, extraLink=[]):
compile = self.compileObjExe % dict({
'python' : self.Python,
'MSVC' : self.MSVC,
'PSDK' : self.PSDK,
'suffix64' : self.suffix64,
'MD' : self.MD,
'pythonIPath' : self.PythonIPath,
'pythonVersion' : self.PythonVersion,
'arch' : self.arch,
'filename' : filename,
'basename' : basename,
}, **sysconf.get_config_vars())
sys.stderr.write(compile + '\n')
if os.system(compile) != 0:
raise Exception('failed to compile %s.' % basename)
link = self.linkExe % dict({
'python' : self.Python,
'MSVC' : self.MSVC,
'PSDK' : self.PSDK,
'suffix64' : self.suffix64,
'pythonIPath' : self.PythonIPath,
'pythonVersion' : self.PythonVersion,
'arch' : self.arch,
'filename' : filename,
'basename' : basename,
}, **sysconf.get_config_vars())
link += ' ' + ' '.join(extraLink)
sys.stderr.write(link + '\n')
if os.system(link) != 0:
raise Exception('failed to link %s.' % basename)
def compileDll(self, filename, basename, extraLink=[]):
compile = self.compileObjDll % dict({
'python' : self.Python,
'MSVC' : self.MSVC,
'PSDK' : self.PSDK,
'suffix64' : self.suffix64,
'MD' : self.MD,
'pythonIPath' : self.PythonIPath,
'pythonVersion' : self.PythonVersion,
'arch' : self.arch,
'filename' : filename,
'basename' : basename,
}, **sysconf.get_config_vars())
sys.stderr.write(compile + '\n')
if os.system(compile) != 0:
raise Exception('failed to compile %s.' % basename)
link = self.linkDll % dict({
'python' : self.Python,
'MSVC' : self.MSVC,
'PSDK' : self.PSDK,
'suffix64' : self.suffix64,
'pythonIPath' : self.PythonIPath,
'pythonVersion' : self.PythonVersion,
'arch' : self.arch,
'filename' : filename,
'basename' : basename,
'dllext' : self.dllext,
}, **sysconf.get_config_vars())
link += ' ' + ' '.join(extraLink)
sys.stderr.write(link + '\n')
if os.system(link) != 0:
raise Exception('failed to link %s.' % basename)
# The code from frozenmain.c in the Python source repository.
frozenMainCode = """
/* Python interpreter main program for frozen scripts */
#include <Python.h>
#if PY_MAJOR_VERSION >= 3
#include <locale.h>
#if PY_MINOR_VERSION < 5
#define Py_DecodeLocale _Py_char2wchar
#endif
#endif
#ifdef MS_WINDOWS
extern void PyWinFreeze_ExeInit(void);
extern void PyWinFreeze_ExeTerm(void);
extern PyAPI_FUNC(int) PyImport_ExtendInittab(struct _inittab *newtab);
#endif
/* Main program */
int
Py_FrozenMain(int argc, char **argv)
{
char *p;
int n, sts = 1;
int inspect = 0;
int unbuffered = 0;
#if PY_MAJOR_VERSION >= 3
int i;
char *oldloc;
wchar_t **argv_copy = NULL;
/* We need a second copies, as Python might modify the first one. */
wchar_t **argv_copy2 = NULL;
if (argc > 0) {
argv_copy = (wchar_t **)alloca(sizeof(wchar_t *) * argc);
argv_copy2 = (wchar_t **)alloca(sizeof(wchar_t *) * argc);
}
#endif
Py_FrozenFlag = 1; /* Suppress errors from getpath.c */
Py_NoSiteFlag = 1;
Py_NoUserSiteDirectory = 1;
if ((p = Py_GETENV("PYTHONINSPECT")) && *p != '\\0')
inspect = 1;
if ((p = Py_GETENV("PYTHONUNBUFFERED")) && *p != '\\0')
unbuffered = 1;
if (unbuffered) {
setbuf(stdin, (char *)NULL);
setbuf(stdout, (char *)NULL);
setbuf(stderr, (char *)NULL);
}
#if PY_MAJOR_VERSION >= 3
oldloc = setlocale(LC_ALL, NULL);
setlocale(LC_ALL, \"\");
for (i = 0; i < argc; i++) {
argv_copy[i] = Py_DecodeLocale(argv[i], NULL);
argv_copy2[i] = argv_copy[i];
if (!argv_copy[i]) {
fprintf(stderr, \"Unable to decode the command line argument #%i\\n\",
i + 1);
argc = i;
goto error;
}
}
setlocale(LC_ALL, oldloc);
#endif
#ifdef MS_WINDOWS
PyImport_ExtendInittab(extensions);
#endif /* MS_WINDOWS */
if (argc >= 1) {
#if PY_MAJOR_VERSION >= 3
Py_SetProgramName(argv_copy[0]);
#else
Py_SetProgramName(argv[0]);
#endif
}
Py_Initialize();
#ifdef MS_WINDOWS
PyWinFreeze_ExeInit();
#endif
if (Py_VerboseFlag)
fprintf(stderr, "Python %s\\n%s\\n",
Py_GetVersion(), Py_GetCopyright());
#if PY_MAJOR_VERSION >= 3
PySys_SetArgv(argc, argv_copy);
#else
PySys_SetArgv(argc, argv);
#endif
n = PyImport_ImportFrozenModule("__main__");
if (n == 0)
Py_FatalError("__main__ not frozen");
if (n < 0) {
PyErr_Print();
sts = 1;
}
else
sts = 0;
if (inspect && isatty((int)fileno(stdin)))
sts = PyRun_AnyFile(stdin, "<stdin>") != 0;
#ifdef MS_WINDOWS
PyWinFreeze_ExeTerm();
#endif
Py_Finalize();
#if PY_MAJOR_VERSION >= 3
error:
if (argv_copy2) {
for (i = 0; i < argc; i++) {
#if PY_MINOR_VERSION >= 4
PyMem_RawFree(argv_copy2[i]);
#else
PyMem_Free(argv_copy2[i]);
#endif
}
}
#endif
return sts;
}
"""
# The code from frozen_dllmain.c in the Python source repository.
# Windows only.
frozenDllMainCode = """
#include <windows.h>
static char *possibleModules[] = {
"pywintypes",
"pythoncom",
"win32ui",
NULL,
};
BOOL CallModuleDllMain(char *modName, DWORD dwReason);
/*
Called by a frozen .EXE only, so that built-in extension
modules are initialized correctly
*/
void PyWinFreeze_ExeInit(void)
{
char **modName;
for (modName = possibleModules;*modName;*modName++) {
/* printf("Initialising '%s'\\n", *modName); */
CallModuleDllMain(*modName, DLL_PROCESS_ATTACH);
}
}
/*
Called by a frozen .EXE only, so that built-in extension
modules are cleaned up
*/
void PyWinFreeze_ExeTerm(void)
{
// Must go backwards
char **modName;
for (modName = possibleModules+(sizeof(possibleModules) / sizeof(char *))-2;
modName >= possibleModules;
*modName--) {
/* printf("Terminating '%s'\\n", *modName);*/
CallModuleDllMain(*modName, DLL_PROCESS_DETACH);
}
}
BOOL WINAPI DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
BOOL ret = TRUE;
switch (dwReason) {
case DLL_PROCESS_ATTACH:
{
char **modName;
for (modName = possibleModules;*modName;*modName++) {
BOOL ok = CallModuleDllMain(*modName, dwReason);
if (!ok)
ret = FALSE;
}
break;
}
case DLL_PROCESS_DETACH:
{
// Must go backwards
char **modName;
for (modName = possibleModules+(sizeof(possibleModules) / sizeof(char *))-2;
modName >= possibleModules;
*modName--)
CallModuleDllMain(*modName, DLL_PROCESS_DETACH);
break;
}
}
return ret;
}
BOOL CallModuleDllMain(char *modName, DWORD dwReason)
{
BOOL (WINAPI * pfndllmain)(HINSTANCE, DWORD, LPVOID);
char funcName[255];
HMODULE hmod = GetModuleHandle(NULL);
strcpy(funcName, "_DllMain");
strcat(funcName, modName);
strcat(funcName, "@12"); // stdcall convention.
pfndllmain = (BOOL (WINAPI *)(HINSTANCE, DWORD, LPVOID))GetProcAddress(hmod, funcName);
if (pfndllmain==NULL) {
/* No function by that name exported - then that module does
not appear in our frozen program - return OK
*/
return TRUE;
}
return (*pfndllmain)(hmod, dwReason, NULL);
}
"""
# Our own glue code to start up a Python executable.
mainInitCode = """
%(frozenMainCode)s
int
main(int argc, char *argv[]) {
PyImport_FrozenModules = _PyImport_FrozenModules;
return Py_FrozenMain(argc, argv);
}
"""
# Our own glue code to start up a Python shared library.
dllInitCode = """
/*
* Call this function to extend the frozen modules array with a new
* array of frozen modules, provided in a C-style array, at runtime.
* Returns the total number of frozen modules.
*/
static int
extend_frozen_modules(const struct _frozen *new_modules, int new_count) {
int orig_count;
struct _frozen *realloc_FrozenModules;
/* First, count the number of frozen modules we had originally. */
orig_count = 0;
while (PyImport_FrozenModules[orig_count].name != NULL) {
++orig_count;
}
if (new_count == 0) {
/* Trivial no-op. */
return orig_count;
}
/* Reallocate the PyImport_FrozenModules array bigger to make room
for the additional frozen modules. We just leak the original
array; it's too risky to try to free it. */
realloc_FrozenModules = (struct _frozen *)malloc((orig_count + new_count + 1) * sizeof(struct _frozen));
/* The new frozen modules go at the front of the list. */
memcpy(realloc_FrozenModules, new_modules, new_count * sizeof(struct _frozen));
/* Then the original set of frozen modules. */
memcpy(realloc_FrozenModules + new_count, PyImport_FrozenModules, orig_count * sizeof(struct _frozen));
/* Finally, a single 0-valued entry marks the end of the array. */
memset(realloc_FrozenModules + orig_count + new_count, 0, sizeof(struct _frozen));
/* Assign the new pointer. */
PyImport_FrozenModules = realloc_FrozenModules;
return orig_count + new_count;
}
#if PY_MAJOR_VERSION >= 3
static PyModuleDef mdef = {
PyModuleDef_HEAD_INIT,
"%(moduleName)s",
"",
-1,
NULL, NULL, NULL, NULL, NULL
};
%(dllexport)sPyObject *PyInit_%(moduleName)s(void) {
extend_frozen_modules(_PyImport_FrozenModules, sizeof(_PyImport_FrozenModules) / sizeof(struct _frozen));
return PyModule_Create(&mdef);
}
#else
static PyMethodDef nullMethods[] = {
{NULL, NULL}
};
%(dllexport)svoid init%(moduleName)s(void) {
extend_frozen_modules(_PyImport_FrozenModules, sizeof(_PyImport_FrozenModules) / sizeof(struct _frozen));
Py_InitModule("%(moduleName)s", nullMethods);
}
#endif
"""
programFile = """
#include <Python.h>
#ifdef _WIN32
#include <malloc.h>
#endif
%(moduleDefs)s
struct _frozen _PyImport_FrozenModules[] = {
%(moduleList)s
{NULL, NULL, 0}
};
"""
okMissing = [
'__main__', '_dummy_threading', 'Carbon', 'Carbon.Files',
'Carbon.Folder', 'Carbon.Folders', 'HouseGlobals', 'Carbon.File',
'MacOS', '_emx_link', 'ce', 'mac', 'org.python.core', 'os.path',
'os2', 'posix', 'pwd', 'readline', 'riscos', 'riscosenviron',
'riscospath', 'dbm', 'fcntl', 'win32api', 'win32pipe', 'usercustomize',
'_winreg', 'winreg', 'ctypes', 'ctypes.wintypes', 'nt','msvcrt',
'EasyDialogs', 'SOCKS', 'ic', 'rourl2path', 'termios', 'vms_lib',
'OverrideFrom23._Res', 'email', 'email.Utils', 'email.Generator',
'email.Iterators', '_subprocess', 'gestalt', 'java.lang',
'direct.extensions_native.extensions_darwin',
]
class Freezer:
class ModuleDef:
def __init__(self, moduleName, filename = None,
implicit = False, guess = False,
exclude = False, forbid = False,
allowChildren = False, fromSource = None,
text = None):
# The Python module name.
self.moduleName = moduleName
# The file on disk it was loaded from, if any.
self.filename = filename
if filename is not None and not isinstance(filename, Filename):
self.filename = Filename(filename)
# True if the module was found via the modulefinder.
self.implicit = implicit
# True if the moduleName might refer to some Python object
# other than a module, in which case the module should be
# ignored.
self.guess = guess
# True if the module should *not* be included in the
# generated output.
self.exclude = exclude
# True if the module should never be allowed, even if it
# exists at runtime.
self.forbid = forbid
# True if excluding the module still allows its children
# to be included. This only makes sense if the module
# will exist at runtime through some other means
# (e.g. from another package).
self.allowChildren = allowChildren
# Additional black-box information about where this module
# record came from, supplied by the caller.
self.fromSource = fromSource
# If this is set, it contains Python code of the module.
self.text = text
# Some sanity checks.
if not self.exclude:
self.allowChildren = True
if self.forbid:
self.exclude = True
self.allowChildren = False
def __repr__(self):
args = [repr(self.moduleName), repr(self.filename)]
if self.implicit:
args.append('implicit = True')
if self.guess:
args.append('guess = True')
if self.exclude:
args.append('exclude = True')
if self.forbid:
args.append('forbid = True')
if self.allowChildren:
args.append('allowChildren = True')
return 'ModuleDef(%s)' % (', '.join(args))
def __init__(self, previous = None, debugLevel = 0,
platform = None, path=None):
# Normally, we are freezing for our own platform. Change this
# if untrue.
self.platform = platform or PandaSystem.getPlatform()
# This is the compilation environment. Fill in your own
# object here if you have custom needs (for instance, for a
# cross-compiler or something). If this is None, then a
# default object will be created when it is needed.
self.cenv = None
# This is the search path to use for Python modules. Leave it
# to the default value of None to use sys.path.
self.path = path
# The filename extension to append to the source file before
# compiling.
self.sourceExtension = '.c'
# The filename extension to append to the object file.
self.objectExtension = '.o'
if self.platform.startswith('win'):
self.objectExtension = '.obj'
self.keepTemporaryFiles = False
# Change any of these to change the generated startup and glue
# code.
self.frozenMainCode = frozenMainCode
self.frozenDllMainCode = frozenDllMainCode
self.mainInitCode = mainInitCode
# Set this true to encode Python files in a Multifile as their
# original source if possible, or false to encode them as
# compiled pyc or pyo files. This has no effect on frozen exe
# or dll's; those are always stored with compiled code.
self.storePythonSource = False
# This list will be filled in by generateCode() or
# addToMultifile(). It contains a list of all the extension
# modules that were discovered, which have not been added to
# the output. The list is a list of tuples of the form
# (moduleName, filename). filename will be None for built-in
# modules.
self.extras = []
# Set this to true if extension modules should be linked in to
# the resulting executable.
self.linkExtensionModules = False
# End of public interface. These remaining members should not
# be directly manipulated by callers.
self.previousModules = {}
self.modules = {}
if previous:
self.previousModules = dict(previous.modules)
self.modules = dict(previous.modules)
# Exclude doctest by default; it is not very useful in production
# builds. It can be explicitly included if desired.
self.modules['doctest'] = self.ModuleDef('doctest', exclude = True)
self.mf = None
# Actually, make sure we know how to find all of the
# already-imported modules. (Some of them might do their own
# special path mangling.)
for moduleName, module in list(sys.modules.items()):
if module and getattr(module, '__path__', None) is not None:
path = list(getattr(module, '__path__'))
if path:
modulefinder.AddPackagePath(moduleName, path[0])
# Suffix/extension for Python C extension modules
if self.platform == PandaSystem.getPlatform():
self.moduleSuffixes = imp.get_suffixes()
# Set extension for Python files to binary mode
for i, suffix in enumerate(self.moduleSuffixes):
if suffix[2] == imp.PY_SOURCE:
self.moduleSuffixes[i] = (suffix[0], 'rb', imp.PY_SOURCE)
else:
self.moduleSuffixes = [('.py', 'rb', 1), ('.pyc', 'rb', 2)]
abi_version = '{0}{1}'.format(*sys.version_info)
abi_flags = ''
if sys.version_info < (3, 8):
abi_flags += 'm'
if 'linux' in self.platform:
self.moduleSuffixes += [
('.cpython-{0}{1}-x86_64-linux-gnu.so'.format(abi_version, abi_flags), 'rb', 3),
('.cpython-{0}{1}-i686-linux-gnu.so'.format(abi_version, abi_flags), 'rb', 3),
('.abi{0}.so'.format(sys.version_info[0]), 'rb', 3),
('.so', 'rb', 3),
]
elif 'win' in self.platform:
# ABI flags are not appended on Windows.
self.moduleSuffixes += [
('.cp{0}-win_amd64.pyd'.format(abi_version), 'rb', 3),
('.cp{0}-win32.pyd'.format(abi_version), 'rb', 3),
('.pyd', 'rb', 3),
]
elif 'mac' in self.platform:
self.moduleSuffixes += [
('.cpython-{0}{1}-darwin.so'.format(abi_version, abi_flags), 'rb', 3),
('.abi{0}.so'.format(sys.version_info[0]), 'rb', 3),
('.so', 'rb', 3),
]
else: # FreeBSD et al.
self.moduleSuffixes += [
('.cpython-{0}{1}.so'.format(abi_version, abi_flags), 'rb', 3),
('.abi{0}.so'.format(sys.version_info[0]), 'rb', 3),
('.so', 'rb', 3),
]
def excludeFrom(self, freezer):
""" Excludes all modules that have already been processed by
the indicated FreezeTool. This is equivalent to passing the
indicated FreezeTool object as previous to this object's
constructor, but it may be called at any point during
processing. """
for key, value in list(freezer.modules.items()):
self.previousModules[key] = value
self.modules[key] = value
def excludeModule(self, moduleName, forbid = False, allowChildren = False,
fromSource = None):
""" Adds a module to the list of modules not to be exported by
this tool. If forbid is true, the module is furthermore
forbidden to be imported, even if it exists on disk. If
allowChildren is true, the children of the indicated module
may still be included."""
assert self.mf is None
self.modules[moduleName] = self.ModuleDef(
moduleName, exclude = True,
forbid = forbid, allowChildren = allowChildren,
fromSource = fromSource)
def handleCustomPath(self, moduleName):
""" Indicates a module that may perform runtime manipulation
of its __path__ variable, and which must therefore be actually
imported at runtime in order to determine the true value of
__path__. """
str = 'import %s' % (moduleName)
exec(str)
module = sys.modules[moduleName]
for path in module.__path__:
modulefinder.AddPackagePath(moduleName, path)
def getModulePath(self, moduleName):
""" Looks for the indicated directory module and returns the
__path__ member: the list of directories in which its python
files can be found. If the module is a .py file and not a
directory, returns None. """
# First, try to import the module directly. That's the most
# reliable answer, if it works.
try:
module = __import__(moduleName)
except:
print("couldn't import %s" % (moduleName))
module = None
if module is not None:
for symbol in moduleName.split('.')[1:]:
module = getattr(module, symbol)
if hasattr(module, '__path__'):
return module.__path__
# If it didn't work--maybe the module is unimportable because
# it makes certain assumptions about the builtins, or
# whatever--then just look for file on disk. That's usually
# good enough.
path = None
baseName = moduleName
if '.' in baseName:
parentName, baseName = moduleName.rsplit('.', 1)
path = self.getModulePath(parentName)
if path is None:
return None
try:
file, pathname, description = imp.find_module(baseName, path)
except ImportError:
return None
if not os.path.isdir(pathname):
return None
return [pathname]
def getModuleStar(self, moduleName):
""" Looks for the indicated directory module and returns the
__all__ member: the list of symbols within the module. """
# First, try to import the module directly. That's the most
# reliable answer, if it works.
try:
module = __import__(moduleName)
except:
print("couldn't import %s" % (moduleName))
module = None
if module is not None:
for symbol in moduleName.split('.')[1:]:
module = getattr(module, symbol)
if hasattr(module, '__all__'):
return module.__all__
# If it didn't work, just open the directory and scan for *.py
# files.
path = None
baseName = moduleName
if '.' in baseName:
parentName, baseName = moduleName.rsplit('.', 1)
path = self.getModulePath(parentName)
if path is None:
return None
try:
file, pathname, description = imp.find_module(baseName, path)
except ImportError:
return None
if not os.path.isdir(pathname):
return None
# Scan the directory, looking for .py files.
modules = []
for basename in os.listdir(pathname):
if basename.endswith('.py') and basename != '__init__.py':
modules.append(basename[:-3])
return modules
def _gatherSubmodules(self, moduleName, implicit = False, newName = None,
filename = None, guess = False, fromSource = None,
text = None):
if not newName:
newName = moduleName
assert(moduleName.endswith('.*'))
assert(newName.endswith('.*'))
mdefs = {}
# Find the parent module, so we can get its directory.
parentName = moduleName[:-2]
newParentName = newName[:-2]
parentNames = [(parentName, newParentName)]
if parentName.endswith('.*'):
assert(newParentName.endswith('.*'))
# Another special case. The parent name "*" means to
# return all possible directories within a particular
# directory.
topName = parentName[:-2]
newTopName = newParentName[:-2]
parentNames = []
modulePath = self.getModulePath(topName)
if modulePath:
for dirname in modulePath:
for basename in os.listdir(dirname):
if os.path.exists(os.path.join(dirname, basename, '__init__.py')):
parentName = '%s.%s' % (topName, basename)
newParentName = '%s.%s' % (newTopName, basename)
if self.getModulePath(parentName):
parentNames.append((parentName, newParentName))
for parentName, newParentName in parentNames:
modules = self.getModuleStar(parentName)
if modules is None:
# It's actually a regular module.
mdefs[newParentName] = self.ModuleDef(
parentName, implicit = implicit, guess = guess,
fromSource = fromSource, text = text)
else:
# Now get all the py files in the parent directory.
for basename in modules:
moduleName = '%s.%s' % (parentName, basename)
newName = '%s.%s' % (newParentName, basename)
mdefs[newName] = self.ModuleDef(
moduleName, implicit = implicit, guess = True,
fromSource = fromSource)
return mdefs
def addModule(self, moduleName, implicit = False, newName = None,
filename = None, guess = False, fromSource = None,
text = None):
""" Adds a module to the list of modules to be exported by
this tool. If implicit is true, it is OK if the module does
not actually exist.
newName is the name to call the module when it appears in the
output. The default is the same name it had in the original.
Use caution when renaming a module; if another module imports
this module by its original name, you will also need to
explicitly add the module under its original name, duplicating
the module twice in the output.
The module name may end in ".*", which means to add all of the
.py files (other than __init__.py) in a particular directory.
It may also end in ".*.*", which means to cycle through all
directories within a particular directory.
"""
assert self.mf is None
if not newName:
newName = moduleName
if moduleName.endswith('.*'):
self.modules.update(self._gatherSubmodules(
moduleName, implicit, newName, filename,
guess, fromSource, text))
else:
# A normal, explicit module name.
self.modules[newName] = self.ModuleDef(
moduleName, filename = filename, implicit = implicit,
guess = guess, fromSource = fromSource, text = text)
def done(self, addStartupModules = False):
""" Call this method after you have added all modules with
addModule(). You may then call generateCode() or
writeMultifile() to dump the resulting output. After a call
to done(), you may not add any more modules until you call
reset(). """
assert self.mf is None
# If we are building an exe, we also need to implicitly
# bring in Python's startup modules.
if addStartupModules:
self.modules['_frozen_importlib'] = self.ModuleDef('importlib._bootstrap', implicit = True)
self.modules['_frozen_importlib_external'] = self.ModuleDef('importlib._bootstrap_external', implicit = True)
for moduleName in startupModules:
if moduleName not in self.modules:
self.addModule(moduleName, implicit = True)
# Excluding a parent module also excludes all its
# (non-explicit) children, unless the parent has allowChildren
# set.
# Walk through the list in sorted order, so we reach parents
# before children.
names = list(self.modules.items())
names.sort()
excludeDict = {}
implicitParentDict = {}
includes = []
autoIncludes = []
origToNewName = {}
for newName, mdef in names:
moduleName = mdef.moduleName
origToNewName[moduleName] = newName
if mdef.implicit and '.' in newName:
# For implicit modules, check if the parent is excluded.
parentName, baseName = newName.rsplit('.', 1)
if parentName in excludeDict :
mdef = excludeDict[parentName]
if mdef.exclude:
if not mdef.allowChildren:
excludeDict[moduleName] = mdef
elif mdef.implicit or mdef.guess:
autoIncludes.append(mdef)
else:
includes.append(mdef)
self.mf = PandaModuleFinder(excludes=list(excludeDict.keys()), suffixes=self.moduleSuffixes, path=self.path)
# Attempt to import the explicit modules into the modulefinder.
# First, ensure the includes are sorted in order so that
# packages appear before the modules they contain. This
# resolves potential ordering issues, especially with modules
# that are discovered by filename rather than through import
# statements.
includes.sort(key = self.__sortModuleKey)
# Now walk through the list and import them all.
for mdef in includes:
try:
self.__loadModule(mdef)
except ImportError as ex:
message = "Unknown module: %s" % (mdef.moduleName)
if str(ex) != "No module named " + str(mdef.moduleName):
message += " (%s)" % (ex)
print(message)
# Also attempt to import any implicit modules. If any of
# these fail to import, we don't really care.
for mdef in autoIncludes:
try:
self.__loadModule(mdef)
# Since it successfully loaded, it's no longer a guess.
mdef.guess = False
except:
# Something went wrong, guess it's not an importable
# module.
pass
# Check if any new modules we found have "hidden" imports
for origName in list(self.mf.modules.keys()):
hidden = hiddenImports.get(origName, [])
for modname in hidden:
if modname.endswith('.*'):
mdefs = self._gatherSubmodules(modname, implicit = True)
for mdef in mdefs.values():
try:
self.__loadModule(mdef)
except ImportError:
pass
else:
self.__loadModule(self.ModuleDef(modname, implicit = True))
# Now, any new modules we found get added to the export list.
for origName in list(self.mf.modules.keys()):
if origName not in origToNewName:
self.modules[origName] = self.ModuleDef(origName, implicit = True)
missing = []
for origName in self.mf.any_missing_maybe()[0]:
if origName in startupModules:
continue
if origName in self.previousModules:
continue
if origName in self.modules:
continue
# This module is missing. Let it be missing in the
# runtime also.
self.modules[origName] = self.ModuleDef(origName, exclude = True,
implicit = True)
if origName in okMissing:
# If it's listed in okMissing, don't even report it.
continue
prefix = origName.split('.')[0]
if origName not in reportedMissing:
missing.append(origName)
reportedMissing[origName] = True
if missing:
missing.sort()
print("There are some missing modules: %r" % missing)
def __sortModuleKey(self, mdef):
""" A sort key function to sort a list of mdef's into order,
primarily to ensure that packages proceed their modules. """
if mdef.moduleName:
# If we have a moduleName, the key consists of the split
# tuple of packages names. That way, parents always sort
# before children.
return ('a', mdef.moduleName.split('.'))
else:
# If we don't have a moduleName, the key doesn't really
# matter--we use filename--but we start with 'b' to ensure
# that all of non-named modules appear following all of
# the named modules.
return ('b', mdef.filename)
def __loadModule(self, mdef):
""" Adds the indicated module to the modulefinder. """
if mdef.filename:
# If it has a filename, then we found it as a file on
# disk. In this case, the moduleName may not be accurate
# and useful, so load it as a file instead.
tempPath = None
if '.' not in mdef.moduleName:
# If we loaded a python file from the root, we need to
# temporarily add its directory to the module search
# path, so the modulefinder can find any sibling
# python files it imports as well.
tempPath = Filename(mdef.filename.getDirname()).toOsSpecific()
self.mf.path.append(tempPath)
pathname = mdef.filename.toOsSpecific()
ext = mdef.filename.getExtension()
if ext == 'pyc' or ext == 'pyo':
fp = open(pathname, 'rb')
stuff = ("", "rb", imp.PY_COMPILED)
self.mf.load_module(mdef.moduleName, fp, pathname, stuff)
else:
stuff = ("", "rb", imp.PY_SOURCE)
if mdef.text:
fp = io.StringIO(mdef.text)
else:
fp = open(pathname, 'rb')
self.mf.load_module(mdef.moduleName, fp, pathname, stuff)
if tempPath:
del self.mf.path[-1]
else:
# Otherwise, we can just import it normally.
self.mf.import_hook(mdef.moduleName)
def reset(self):
""" After a previous call to done(), this resets the
FreezeTool object for a new pass. More modules may be added
and dumped to a new target. Previously-added modules are
remembered and will not be dumped again. """
self.mf = None
self.previousModules = dict(self.modules)
def mangleName(self, moduleName):
return 'M_' + moduleName.replace('.', '__').replace('-', '_')
def getAllModuleNames(self):
""" Return a list of all module names that have been included
or forbidden, either in this current pass or in a previous
pass. Module names that have been excluded are not included
in this list. """
moduleNames = []
for newName, mdef in list(self.modules.items()):
if mdef.guess:
# Not really a module.
pass
elif mdef.exclude and not mdef.forbid:
# An excluded (but not forbidden) file.
pass
else:
moduleNames.append(newName)
moduleNames.sort()
return moduleNames
def getModuleDefs(self):
""" Return a list of all of the modules we will be explicitly
or implicitly including. The return value is actually a list
of tuples: (moduleName, moduleDef)."""
moduleDefs = []
for newName, mdef in list(self.modules.items()):
prev = self.previousModules.get(newName, None)
if not mdef.exclude:
# Include this module (even if a previous pass
# excluded it). But don't bother if we exported it
# previously.
if prev and not prev.exclude:
# Previously exported.
pass
elif mdef.moduleName in self.mf.modules or \
mdef.moduleName in startupModules or \
mdef.filename:
moduleDefs.append((newName, mdef))
elif mdef.forbid:
if not prev or not prev.forbid:
moduleDefs.append((newName, mdef))
moduleDefs.sort()
return moduleDefs
def __replacePaths(self):
# Build up the replacement pathname table, so we can eliminate
# the personal information in the frozen pathnames. The
# actual filename we put in there is meaningful only for stack
# traces, so we'll just use the module name.
replace_paths = []
for moduleName, module in list(self.mf.modules.items()):
if module.__code__:
origPathname = module.__code__.co_filename
replace_paths.append((origPathname, moduleName))
self.mf.replace_paths = replace_paths
# Now that we have built up the replacement mapping, go back
# through and actually replace the paths.
for moduleName, module in list(self.mf.modules.items()):
if module.__code__:
co = self.mf.replace_paths_in_code(module.__code__)
module.__code__ = co;
def __addPyc(self, multifile, filename, code, compressionLevel):
if code:
data = imp.get_magic() + b'\0\0\0\0'
if sys.version_info >= (3, 0):
data += b'\0\0\0\0'
data += marshal.dumps(code)
stream = StringStream(data)
multifile.addSubfile(filename, stream, compressionLevel)
multifile.flush()
def __addPythonDirs(self, multifile, moduleDirs, dirnames, compressionLevel):
""" Adds all of the names on dirnames as a module directory. """
if not dirnames:
return
str = '.'.join(dirnames)
if str not in moduleDirs:
# Add an implicit __init__.py file (but only if there's
# not already a legitimate __init__.py file).
moduleName = '.'.join(dirnames)
filename = '/'.join(dirnames) + '/__init__'
if self.storePythonSource:
filename += '.py'
stream = StringStream(b'')
if multifile.findSubfile(filename) < 0:
multifile.addSubfile(filename, stream, 0)
multifile.flush()
else:
if __debug__:
filename += '.pyc'
else:
filename += '.pyo'
if multifile.findSubfile(filename) < 0:
code = compile('', moduleName, 'exec')
self.__addPyc(multifile, filename, code, compressionLevel)
moduleDirs[str] = True
self.__addPythonDirs(multifile, moduleDirs, dirnames[:-1], compressionLevel)
def __addPythonFile(self, multifile, moduleDirs, moduleName, mdef,
compressionLevel):
""" Adds the named module to the multifile as a .pyc file. """
# First, split the module into its subdirectory names.
dirnames = moduleName.split('.')
if len(dirnames) > 1 and dirnames[-1] == '__init__':
# The "module" may end in __init__, but that really means
# the parent directory.
dirnames = dirnames[:-1]
self.__addPythonDirs(multifile, moduleDirs, dirnames[:-1], compressionLevel)
filename = '/'.join(dirnames)
module = self.mf.modules.get(mdef.moduleName, None)
if getattr(module, '__path__', None) is not None or \
(getattr(module, '__file__', None) is not None and getattr(module, '__file__').endswith('/__init__.py')):
# It's actually a package. In this case, we really write
# the file moduleName/__init__.py.
filename += '/__init__'
moduleDirs[moduleName] = True
# Ensure we don't have an implicit filename from above.
multifile.removeSubfile(filename + '.py')
if __debug__:
multifile.removeSubfile(filename + '.pyc')
else:
multifile.removeSubfile(filename + '.pyo')
# Attempt to add the original source file if we can.
sourceFilename = None
if mdef.filename and mdef.filename.getExtension() == "py":
sourceFilename = mdef.filename
elif getattr(module, '__file__', None):
sourceFilename = Filename.fromOsSpecific(module.__file__)
sourceFilename.setExtension("py")
sourceFilename.setText()
if self.storePythonSource:
if sourceFilename and sourceFilename.exists():
filename += '.py'
multifile.addSubfile(filename, sourceFilename, compressionLevel)
return
# If we can't find the source file, add the compiled pyc instead.
if __debug__:
filename += '.pyc'
else:
filename += '.pyo'
code = None
if module:
# Get the compiled code directly from the module object.
code = getattr(module, "__code__", None)
if not code:
# This is a module with no associated Python
# code. It must be an extension module. Get the
# filename.
extensionFilename = getattr(module, '__file__', None)
if extensionFilename:
self.extras.append((moduleName, extensionFilename))
else:
# It doesn't even have a filename; it must
# be a built-in module. No worries about
# this one, then.
pass
else:
# Read the code from the source file and compile it on-the-fly.
if sourceFilename and sourceFilename.exists():
source = open(sourceFilename.toOsSpecific(), 'r').read()
if source and source[-1] != '\n':
source = source + '\n'
code = compile(source, str(sourceFilename), 'exec')
self.__addPyc(multifile, filename, code, compressionLevel)
def addToMultifile(self, multifile, compressionLevel = 0):
""" After a call to done(), this stores all of the accumulated
python code into the indicated Multifile. Additional
extension modules are listed in self.extras. """
moduleDirs = {}
for moduleName, mdef in self.getModuleDefs():
if not mdef.exclude:
self.__addPythonFile(multifile, moduleDirs, moduleName, mdef,
compressionLevel)
def writeMultifile(self, mfname):
""" After a call to done(), this stores all of the accumulated
python code into a Multifile with the indicated filename,
including the extension. Additional extension modules are
listed in self.extras."""
self.__replacePaths()
Filename(mfname).unlink()
multifile = Multifile()
if not multifile.openReadWrite(mfname):
raise Exception
self.addToMultifile(multifile)
multifile.flush()
multifile.repack()
def writeCode(self, filename, initCode = ""):
""" After a call to done(), this freezes all of the accumulated
Python code into a C source file. """
self.__replacePaths()
# Now generate the actual export table.
moduleDefs = []
moduleList = []
for moduleName, mdef in self.getModuleDefs():
origName = mdef.moduleName
if mdef.forbid:
# Explicitly disallow importing this module.
moduleList.append(self.makeForbiddenModuleListEntry(moduleName))
continue
assert not mdef.exclude
# Allow importing this module.
module = self.mf.modules.get(origName, None)
code = getattr(module, "__code__", None)
if code:
code = marshal.dumps(code)
mangledName = self.mangleName(moduleName)
moduleDefs.append(self.makeModuleDef(mangledName, code))
moduleList.append(self.makeModuleListEntry(mangledName, code, moduleName, module))
continue
#if moduleName in startupModules:
# # Forbid the loading of this startup module.
# moduleList.append(self.makeForbiddenModuleListEntry(moduleName))
# continue
# This is a module with no associated Python code. It is either
# an extension module or a builtin module. Get the filename, if
# it is the former.
extensionFilename = getattr(module, '__file__', None)
if extensionFilename or self.linkExtensionModules:
self.extras.append((moduleName, extensionFilename))
# If it is a submodule of a frozen module, Python will have
# trouble importing it as a builtin module. Synthesize a frozen
# module that loads it as builtin.
if '.' in moduleName and self.linkExtensionModules:
code = compile('import sys;del sys.modules["%s"];import imp;imp.init_builtin("%s")' % (moduleName, moduleName), moduleName, 'exec')
code = marshal.dumps(code)
mangledName = self.mangleName(moduleName)
moduleDefs.append(self.makeModuleDef(mangledName, code))
moduleList.append(self.makeModuleListEntry(mangledName, code, moduleName, None))
elif '.' in moduleName:
# Nothing we can do about this case except warn the user they
# are in for some trouble.
print('WARNING: Python cannot import extension modules under '
'frozen Python packages; %s will be inaccessible. '
'passing either -l to link in extension modules or use '
'-x %s to exclude the entire package.' % (moduleName, moduleName.split('.')[0]))
text = programFile % {
'moduleDefs': '\n'.join(moduleDefs),
'moduleList': '\n'.join(moduleList),
}
if self.linkExtensionModules and self.extras:
# Should we link in extension modules? If so, we write out a new
# built-in module table that directly hooks up with the init
# functions. On Linux, we completely override Python's own
# built-in module table; on Windows, we can't do this, so we
# instead use PyImport_ExtendInittab to add to it.
# Python 3 case.
text += '#if PY_MAJOR_VERSION >= 3\n'
for module, fn in self.extras:
if sys.platform != "win32" or fn:
libName = module.split('.')[-1]
initFunc = builtinInitFuncs.get(module, 'PyInit_' + libName)
if initFunc:
text += 'extern PyAPI_FUNC(PyObject) *%s(void);\n' % (initFunc)
text += '\n'
if sys.platform == "win32":
text += 'static struct _inittab extensions[] = {\n'
else:
text += 'struct _inittab _PyImport_Inittab[] = {\n'
for module, fn in self.extras:
if sys.platform != "win32" or fn:
libName = module.split('.')[-1]
initFunc = builtinInitFuncs.get(module, 'PyInit_' + libName) or 'NULL'
text += ' {"%s", %s},\n' % (module, initFunc)
text += ' {0, 0},\n'
text += '};\n\n'
# Python 2 case.
text += '#else\n'
for module, fn in self.extras:
if sys.platform != "win32" or fn:
libName = module.split('.')[-1]
initFunc = builtinInitFuncs.get(module, 'init' + libName)
if initFunc:
text += 'extern PyAPI_FUNC(void) %s(void);\n' % (initFunc)
text += '\n'
if sys.platform == "win32":
text += 'static struct _inittab extensions[] = {\n'
else:
text += 'struct _inittab _PyImport_Inittab[] = {\n'
for module, fn in self.extras:
if sys.platform != "win32" or fn:
libName = module.split('.')[-1]
initFunc = builtinInitFuncs.get(module, 'init' + libName) or 'NULL'
text += ' {"%s", %s},\n' % (module, initFunc)
text += ' {0, 0},\n'
text += '};\n'
text += '#endif\n\n'
elif sys.platform == "win32":
text += 'static struct _inittab extensions[] = {\n'
text += ' {0, 0},\n'
text += '};\n\n'
text += initCode
if filename is not None:
file = open(filename, 'w')
file.write(text)
file.close()
def generateCode(self, basename, compileToExe = False):
""" After a call to done(), this freezes all of the
accumulated python code into either an executable program (if
compileToExe is true) or a dynamic library (if compileToExe is
false). The basename is the name of the file to write,
without the extension.
The return value is the newly-generated filename, including
the filename extension. Additional extension modules are
listed in self.extras. """
if compileToExe:
# We must have a __main__ module to make an exe file.
if not self.__writingModule('__main__'):
message = "Can't generate an executable without a __main__ module."
raise Exception(message)
filename = basename + self.sourceExtension
dllexport = ''
dllimport = ''
if self.platform.startswith('win'):
dllexport = '__declspec(dllexport) '
dllimport = '__declspec(dllimport) '
if not self.cenv:
self.cenv = CompilationEnvironment(platform = self.platform)
if compileToExe:
code = self.frozenMainCode
if self.platform.startswith('win'):
code += self.frozenDllMainCode
initCode = self.mainInitCode % {
'frozenMainCode' : code,
'programName' : os.path.basename(basename),
'dllexport' : dllexport,
'dllimport' : dllimport,
}
if self.platform.startswith('win'):
target = basename + '.exe'
else:
target = basename
compileFunc = self.cenv.compileExe
else:
if self.platform.startswith('win'):
target = basename + self.cenv.dllext + '.pyd'
else:
target = basename + '.so'
initCode = dllInitCode % {
'moduleName' : os.path.basename(basename),
'dllexport' : dllexport,
'dllimport' : dllimport,
}
compileFunc = self.cenv.compileDll
self.writeCode(filename, initCode=initCode)
# Keep track of the files we should clean up after use.
cleanFiles = [filename, basename + self.objectExtension]
extraLink = []
if self.linkExtensionModules:
for mod, fn in self.extras:
if not fn:
continue
if sys.platform == 'win32':
# We can't link with a .pyd directly on Windows. Check
# if there is a corresponding .lib file in the Python libs
# directory.
libsdir = os.path.join(sys.exec_prefix, 'libs')
libfile = os.path.join(libsdir, mod + '.lib')
if os.path.isfile(libfile):
extraLink.append(mod + '.lib')
continue
# No, so we have to generate a .lib file. This is pretty
# easy given that we know the only symbol we need is a
# initmodule or PyInit_module function.
modname = mod.split('.')[-1]
libfile = modname + '.lib'
if sys.version_info >= (3, 0):
symbolName = 'PyInit_' + modname
else:
symbolName = 'init' + modname
os.system('lib /nologo /def /export:%s /name:%s.pyd /out:%s' % (symbolName, modname, libfile))
extraLink.append(libfile)
cleanFiles += [libfile, modname + '.exp']
else:
extraLink.append(fn)
try:
compileFunc(filename, basename, extraLink=extraLink)
finally:
if not self.keepTemporaryFiles:
for file in cleanFiles:
if os.path.exists(file):
os.unlink(file)
return target
def generateRuntimeFromStub(self, target, stub_file, use_console, fields={},
log_append=False):
# We must have a __main__ module to make an exe file.
if not self.__writingModule('__main__'):
message = "Can't generate an executable without a __main__ module."
raise Exception(message)
if self.platform.startswith('win'):
modext = '.pyd'
else:
modext = '.so'
# First gather up the strings and code for all the module names, and
# put those in a string pool.
pool = b""
strings = set()
for moduleName, mdef in self.getModuleDefs():
strings.add(moduleName.encode('ascii'))
for value in fields.values():
if value is not None:
strings.add(value.encode('utf-8'))
# Sort by length descending, allowing reuse of partial strings.
strings = sorted(strings, key=lambda str:-len(str))
string_offsets = {}
# Now add the strings to the pool, and collect the offsets relative to
# the beginning of the pool.
for string in strings:
# First check whether it's already in there; it could be part of
# a longer string.
offset = pool.find(string + b'\0')
if offset < 0:
offset = len(pool)
pool += string + b'\0'
string_offsets[string] = offset
# Now go through the modules and add them to the pool as well. These
# are not 0-terminated, but we later record their sizes and names in
# a table after the blob header.
moduleList = []
for moduleName, mdef in self.getModuleDefs():
origName = mdef.moduleName
if mdef.forbid:
# Explicitly disallow importing this module.
moduleList.append((moduleName, 0, 0))
continue
# For whatever it's worth, align the code blocks.
if len(pool) & 3 != 0:
pad = (4 - (len(pool) & 3))
pool += b'\0' * pad
assert not mdef.exclude
# Allow importing this module.
module = self.mf.modules.get(origName, None)
code = getattr(module, "__code__", None)
if code:
code = marshal.dumps(code)
size = len(code)
if getattr(module, "__path__", None):
# Indicate package by negative size
size = -size
moduleList.append((moduleName, len(pool), size))
pool += code
continue
# This is a module with no associated Python code. It is either
# an extension module or a builtin module. Get the filename, if
# it is the former.
extensionFilename = getattr(module, '__file__', None)
if extensionFilename:
self.extras.append((moduleName, extensionFilename))
# If it is a submodule of a frozen module, Python will have
# trouble importing it as a builtin module. Synthesize a frozen
# module that loads it dynamically.
if '.' in moduleName:
if self.platform.startswith("macosx") and not use_console:
# We write the Frameworks directory to sys.path[0].
code = 'import sys;del sys.modules["%s"];import sys,os,imp;imp.load_dynamic("%s",os.path.join(sys.path[0], "%s%s"))' % (moduleName, moduleName, moduleName, modext)
else:
code = 'import sys;del sys.modules["%s"];import sys,os,imp;imp.load_dynamic("%s",os.path.join(os.path.dirname(sys.executable), "%s%s"))' % (moduleName, moduleName, moduleName, modext)
if sys.version_info >= (3, 2):
code = compile(code, moduleName, 'exec', optimize=2)
else:
code = compile(code, moduleName, 'exec')
code = marshal.dumps(code)
moduleList.append((moduleName, len(pool), len(code)))
pool += code
# Determine the format of the header and module list entries depending
# on the platform.
num_pointers = 12
stub_data = bytearray(stub_file.read())
bitnesses = self._get_executable_bitnesses(stub_data)
header_layouts = {
32: '<QQHHHH8x%dII' % num_pointers,
64: '<QQHHHH8x%dQQ' % num_pointers,
}
entry_layouts = {
32: '<IIi',
64: '<QQixxxx',
}
# Calculate the size of the module tables, so that we can determine
# the proper offset for the string pointers. There can be more than
# one module table for macOS executables. Sort the bitnesses so that
# the alignment is correct.
bitnesses = sorted(bitnesses, reverse=True)
pool_offset = 0
for bitness in bitnesses:
pool_offset += (len(moduleList) + 1) * struct.calcsize(entry_layouts[bitness])
# Now we can determine the offset of the blob.
if self.platform.startswith('win'):
# We don't use mmap on Windows. Align just for good measure.
blob_align = 32
else:
# Align to page size, so that it can be mmapped.
blob_align = 4096
# Add padding before the blob if necessary.
blob_offset = len(stub_data)
if (blob_offset & (blob_align - 1)) != 0:
pad = (blob_align - (blob_offset & (blob_align - 1)))
stub_data += (b'\0' * pad)
blob_offset += pad
assert (blob_offset % blob_align) == 0
assert blob_offset == len(stub_data)
# Also determine the total blob size now. Add padding to the end.
blob_size = pool_offset + len(pool)
if blob_size & 31 != 0:
pad = (32 - (blob_size & 31))
blob_size += pad
# Calculate the offsets for the variables. These are pointers,
# relative to the beginning of the blob.
field_offsets = {}
for key, value in fields.items():
if value is not None:
encoded = value.encode('utf-8')
field_offsets[key] = pool_offset + string_offsets[encoded]
# OK, now go and write the blob. This consists of the module table
# (there may be two in the case of a macOS universal (fat) binary).
blob = b""
append_offset = False
for bitness in bitnesses:
entry_layout = entry_layouts[bitness]
header_layout = header_layouts[bitness]
table_offset = len(blob)
for moduleName, offset, size in moduleList:
encoded = moduleName.encode('ascii')
string_offset = pool_offset + string_offsets[encoded]
if size != 0:
offset += pool_offset
blob += struct.pack(entry_layout, string_offset, offset, size)
# A null entry marks the end of the module table.
blob += struct.pack(entry_layout, 0, 0, 0)
flags = 0
if log_append:
flags |= 1
# Compose the header we will be writing to the stub, to tell it
# where to find the module data blob, as well as other variables.
header = struct.pack(header_layout,
blob_offset,
blob_size,
1, # Version number
num_pointers, # Number of pointers that follow
0, # Codepage, not yet used
flags,
table_offset, # Module table pointer.
# The following variables need to be set before static init
# time. See configPageManager.cxx, where they are read.
field_offsets.get('prc_data', 0),
field_offsets.get('default_prc_dir', 0),
field_offsets.get('prc_dir_envvars', 0),
field_offsets.get('prc_path_envvars', 0),
field_offsets.get('prc_patterns', 0),
field_offsets.get('prc_encrypted_patterns', 0),
field_offsets.get('prc_encryption_key', 0),
field_offsets.get('prc_executable_patterns', 0),
field_offsets.get('prc_executable_args_envvar', 0),
field_offsets.get('main_dir', 0),
field_offsets.get('log_filename', 0),
0)
# Now, find the location of the 'blobinfo' symbol in the binary,
# to which we will write our header.
if not self._replace_symbol(stub_data, b'blobinfo', header, bitness=bitness):
# This must be a legacy deploy-stub, which requires the offset to
# be appended to the end.
append_offset = True
# Add the string/code pool.
assert len(blob) == pool_offset
blob += pool
del pool
# Now pad out the blob to the calculated blob size.
if len(blob) < blob_size:
blob += b'\0' * (blob_size - len(blob))
assert len(blob) == blob_size
if append_offset:
# This is for legacy deploy-stub.
print("WARNING: Could not find blob header. Is deploy-stub outdated?")
blob += struct.pack('<Q', blob_offset)
with open(target, 'wb') as f:
f.write(stub_data)
assert f.tell() == blob_offset
f.write(blob)
os.chmod(target, 0o755)
return target
def _get_executable_bitnesses(self, data):
"""Returns the bitnesses (32 or 64) of the given executable data.
This will contain 1 element for non-fat executables."""
if data.startswith(b'MZ'):
# A Windows PE file.
offset, = struct.unpack_from('<I', data, 0x3c)
assert data[offset:offset+4] == b'PE\0\0'
magic, = struct.unpack_from('<H', data, offset + 24)
assert magic in (0x010b, 0x020b)
if magic == 0x020b:
return (64,)
else:
return (32,)
elif data.startswith(b"\177ELF"):
# A Linux/FreeBSD ELF executable.
elfclass = ord(data[4:5])
assert elfclass in (1, 2)
return (elfclass * 32,)
elif data[:4] in (b'\xFE\xED\xFA\xCE', b'\xCE\xFA\xED\xFE'):
# 32-bit Mach-O file, as used on macOS.
return (32,)
elif data[:4] in (b'\xFE\xED\xFA\xCF', b'\xCF\xFA\xED\xFE'):
# 64-bit Mach-O file, as used on macOS.
return (64,)
elif data[:4] in (b'\xCA\xFE\xBA\xBE', b'\xBE\xBA\xFE\xCA'):
# Universal binary with 32-bit offsets.
num_fat, = struct.unpack_from('>I', data, 4)
bitnesses = set()
ptr = 8
for i in range(num_fat):
cputype, cpusubtype, offset, size, align = \
struct.unpack_from('>IIIII', data, ptr)
ptr += 20
if (cputype & 0x1000000) != 0:
bitnesses.add(64)
else:
bitnesses.add(32)
return tuple(bitnesses)
elif data[:4] in (b'\xCA\xFE\xBA\xBF', b'\xBF\xBA\xFE\xCA'):
# Universal binary with 64-bit offsets.
num_fat, = struct.unpack_from('>I', data, 4)
bitnesses = set()
ptr = 8
for i in range(num_fat):
cputype, cpusubtype, offset, size, align = \
struct.unpack_from('>QQQQQ', data, ptr)
ptr += 40
if (cputype & 0x1000000) != 0:
bitnesses.add(64)
else:
bitnesses.add(32)
return tuple(bitnesses)
def _replace_symbol(self, data, symbol_name, replacement, bitness=None):
"""We store a custom section in the binary file containing a header
containing offsets to the binary data.
If bitness is set, and the binary in question is a macOS universal
binary, it only replaces for binaries with the given bitness. """
if data.startswith(b'MZ'):
# A Windows PE file.
pe = pefile.PEFile()
pe.read(io.BytesIO(data))
addr = pe.get_export_address(symbol_name)
if addr is not None:
# We found it, return its offset in the file.
offset = pe.get_address_offset(addr)
if offset is not None:
data[offset:offset+len(replacement)] = replacement
return True
elif data.startswith(b"\177ELF"):
return self._replace_symbol_elf(data, symbol_name, replacement)
elif data[:4] in (b'\xFE\xED\xFA\xCE', b'\xCE\xFA\xED\xFE',
b'\xFE\xED\xFA\xCF', b'\xCF\xFA\xED\xFE'):
off = self._find_symbol_macho(data, symbol_name)
if off is not None:
data[off:off+len(replacement)] = replacement
return True
return False
elif data[:4] in (b'\xCA\xFE\xBA\xBE', b'\xBE\xBA\xFE\xCA'):
# Universal binary with 32-bit offsets.
num_fat, = struct.unpack_from('>I', data, 4)
replaced = False
ptr = 8
for i in range(num_fat):
cputype, cpusubtype, offset, size, align = \
struct.unpack_from('>IIIII', data, ptr)
ptr += 20
# Does this match the requested bitness?
if bitness is not None and ((cputype & 0x1000000) != 0) != (bitness == 64):
continue
macho_data = data[offset:offset+size]
off = self._find_symbol_macho(macho_data, symbol_name)
if off is not None:
off += offset
data[off:off+len(replacement)] = replacement
replaced = True
return replaced
elif data[:4] in (b'\xCA\xFE\xBA\xBF', b'\xBF\xBA\xFE\xCA'):
# Universal binary with 64-bit offsets.
num_fat, = struct.unpack_from('>I', data, 4)
replaced = False
ptr = 8
for i in range(num_fat):
cputype, cpusubtype, offset, size, align = \
struct.unpack_from('>QQQQQ', data, ptr)
ptr += 40
# Does this match the requested bitness?
if bitness is not None and ((cputype & 0x1000000) != 0) != (bitness == 64):
continue
macho_data = data[offset:offset+size]
off = self._find_symbol_macho(macho_data, symbol_name)
if off is not None:
off += offset
data[off:off+len(replacement)] = replacement
replaced = True
return replaced
# We don't know what kind of file this is.
return False
def _replace_symbol_elf(self, elf_data, symbol_name, replacement):
""" The Linux/FreeBSD implementation of _replace_symbol. """
replaced = False
# Make sure we read in the correct endianness and integer size
endian = "<>"[ord(elf_data[5:6]) - 1]
is_64bit = ord(elf_data[4:5]) - 1 # 0 = 32-bits, 1 = 64-bits
header_struct = endian + ("HHIIIIIHHHHHH", "HHIQQQIHHHHHH")[is_64bit]
section_struct = endian + ("4xI4xIIII8xI", "4xI8xQQQI12xQ")[is_64bit]
symbol_struct = endian + ("IIIBBH", "IBBHQQ")[is_64bit]
header_size = struct.calcsize(header_struct)
type, machine, version, entry, phoff, shoff, flags, ehsize, phentsize, phnum, shentsize, shnum, shstrndx \
= struct.unpack_from(header_struct, elf_data, 16)
section_offsets = []
symbol_tables = []
string_tables = {}
# Seek to the section header table and find the symbol tables.
ptr = shoff
for i in range(shnum):
type, addr, offset, size, link, entsize = struct.unpack_from(section_struct, elf_data[ptr:ptr+shentsize])
ptr += shentsize
section_offsets.append(offset - addr)
if type == 0x0B and link != 0: # SHT_DYNSYM, links to string table
symbol_tables.append((offset, size, link, entsize))
string_tables[link] = None
# Read the relevant string tables.
for idx in list(string_tables.keys()):
ptr = shoff + idx * shentsize
type, addr, offset, size, link, entsize = struct.unpack_from(section_struct, elf_data[ptr:ptr+shentsize])
if type == 3:
string_tables[idx] = elf_data[offset:offset+size]
# Loop through to find the offset of the "blobinfo" symbol.
for offset, size, link, entsize in symbol_tables:
entries = size // entsize
for i in range(entries):
ptr = offset + i * entsize
fields = struct.unpack_from(symbol_struct, elf_data[ptr:ptr+entsize])
if is_64bit:
name, info, other, shndx, value, size = fields
else:
name, value, size, info, other, shndx = fields
if not name:
continue
name = string_tables[link][name : string_tables[link].find(b'\0', name)]
if name == symbol_name:
if shndx == 0: # SHN_UNDEF
continue
elif shndx >= 0xff00 and shndx <= 0xffff:
assert False
else:
# Got it. Make the replacement.
off = section_offsets[shndx] + value
elf_data[off:off+len(replacement)] = replacement
replaced = True
return replaced
def _find_symbol_macho(self, macho_data, symbol_name):
""" Returns the offset of the given symbol in the binary file. """
if macho_data[:4] in (b'\xCE\xFA\xED\xFE', b'\xCF\xFA\xED\xFE'):
endian = '<'
else:
endian = '>'
cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
struct.unpack_from(endian + 'IIIIII', macho_data, 4)
is_64bit = (cputype & 0x1000000) != 0
segments = []
cmd_ptr = 28
nlist_struct = endian + 'IBBHI'
if is_64bit:
nlist_struct = endian + 'IBBHQ'
cmd_ptr += 4
nlist_size = struct.calcsize(nlist_struct)
for i in range(ncmds):
cmd, cmd_size = struct.unpack_from(endian + 'II', macho_data, cmd_ptr)
cmd_data = macho_data[cmd_ptr+8:cmd_ptr+cmd_size]
cmd_ptr += cmd_size
cmd &= ~0x80000000
if cmd == 0x01: # LC_SEGMENT
segname, vmaddr, vmsize, fileoff, filesize, maxprot, initprot, nsects, flags = \
struct.unpack_from(endian + '16sIIIIIIII', cmd_data)
segments.append((vmaddr, vmsize, fileoff))
elif cmd == 0x19: # LC_SEGMENT_64
segname, vmaddr, vmsize, fileoff, filesize, maxprot, initprot, nsects, flags = \
struct.unpack_from(endian + '16sQQQQIIII', cmd_data)
segments.append((vmaddr, vmsize, fileoff))
elif cmd == 0x2: # LC_SYMTAB
symoff, nsyms, stroff, strsize = \
struct.unpack_from(endian + 'IIII', cmd_data)
strings = macho_data[stroff:stroff+strsize]
for i in range(nsyms):
strx, type, sect, desc, value = struct.unpack_from(nlist_struct, macho_data, symoff)
symoff += nlist_size
name = strings[strx : strings.find(b'\0', strx)]
if name == b'_' + symbol_name:
# Find out in which segment this is.
for vmaddr, vmsize, fileoff in segments:
# Is it defined in this segment?
rel = value - vmaddr
if rel >= 0 and rel < vmsize:
# Yes, so return the symbol offset.
return fileoff + rel
print("Could not find memory address for symbol %s" % (symbol_name))
def makeModuleDef(self, mangledName, code):
result = ''
result += 'static unsigned char %s[] = {' % (mangledName)
for i in range(0, len(code), 16):
result += '\n '
for c in code[i:i+16]:
if isinstance(c, int): # Python 3
result += ('%d,' % c)
else: # Python 2
result += ('%d,' % ord(c))
result += '\n};\n'
return result
def makeModuleListEntry(self, mangledName, code, moduleName, module):
size = len(code)
if getattr(module, "__path__", None):
# Indicate package by negative size
size = -size
return ' {"%s", %s, %s},' % (moduleName, mangledName, size)
def makeForbiddenModuleListEntry(self, moduleName):
return ' {"%s", NULL, 0},' % (moduleName)
def __writingModule(self, moduleName):
""" Returns true if we are outputting the named module in this
pass, false if we have already output in a previous pass, or
if it is not yet on the output table. """
mdef = self.modules.get(moduleName, (None, None))
if mdef.exclude:
return False
if moduleName in self.previousModules:
return False
return True
_PKG_NAMESPACE_DIRECTORY = object()
class PandaModuleFinder(modulefinder.ModuleFinder):
def __init__(self, *args, **kw):
"""
:param path: search path to look on, defaults to sys.path
:param suffixes: defaults to imp.get_suffixes()
:param excludes: a list of modules to exclude
:param debug: an integer indicating the level of verbosity
"""
self.suffixes = kw.pop('suffixes', imp.get_suffixes())
modulefinder.ModuleFinder.__init__(self, *args, **kw)
# Make sure we don't open a .whl/.zip file more than once.
self._zip_files = {}
def _open_file(self, path, mode):
""" Opens a module at the given path, which may contain a zip file.
Returns None if the module could not be found. """
if os.path.isfile(path):
if 'b' not in mode:
return io.open(path, mode, encoding='utf8')
else:
return open(path, mode)
# Is there a zip file along the path?
dir, dirname = os.path.split(path)
fn = dirname
while dirname:
if os.path.isfile(dir):
# Okay, this is actually a file. Is it a zip file?
if dir in self._zip_files:
# Yes, and we've previously opened this.
zip = self._zip_files[dir]
elif zipfile.is_zipfile(dir):
zip = zipfile.ZipFile(dir)
self._zip_files[dir] = zip
else:
# It's a different kind of file. Stop looking.
return None
try:
fp = zip.open(fn.replace(os.path.sep, '/'), 'r')
except KeyError:
return None
if sys.version_info >= (3, 0) and 'b' not in mode:
return io.TextIOWrapper(fp, encoding='utf8')
return fp
# Look at the parent directory.
dir, dirname = os.path.split(dir)
fn = os.path.join(dirname, fn)
return None
def _dir_exists(self, path):
"""Returns True if the given directory exists, either on disk or inside
a wheel."""
if os.path.isdir(path):
return True
# Is there a zip file along the path?
dir, dirname = os.path.split(path.rstrip(os.path.sep + '/'))
fn = dirname
while dirname:
if os.path.isfile(dir):
# Okay, this is actually a file. Is it a zip file?
if dir in self._zip_files:
# Yes, and we've previously opened this.
zip = self._zip_files[dir]
elif zipfile.is_zipfile(dir):
zip = zipfile.ZipFile(dir)
self._zip_files[dir] = zip
else:
# It's a different kind of file. Stop looking.
return None
# (Most) zip files do not store directories; check instead for a
# file whose path starts with this directory name.
prefix = fn.replace(os.path.sep, '/') + '/'
for name in zip.namelist():
if name.startswith(prefix):
return True
return False
# Look at the parent directory.
dir, dirname = os.path.split(dir)
fn = os.path.join(dirname, fn)
return False
def load_module(self, fqname, fp, pathname, file_info):
"""Copied from ModuleFinder.load_module with fixes to handle sending bytes
to compile() for PY_SOURCE types. Sending bytes to compile allows it to
handle file encodings."""
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type is _PKG_NAMESPACE_DIRECTORY:
m = self.add_module(fqname)
m.__code__ = compile('', '', 'exec')
m.__path__ = pathname
return m
if type == imp.PY_SOURCE:
if fqname in overrideModules:
# This module has a custom override.
code = overrideModules[fqname]
else:
code = fp.read()
code += b'\n' if isinstance(code, bytes) else '\n'
co = compile(code, pathname, 'exec')
elif type == imp.PY_COMPILED:
if sys.version_info >= (3, 7):
try:
data = fp.read()
importlib._bootstrap_external._classify_pyc(data, fqname, {})
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(memoryview(data)[16:])
elif sys.version_info >= (3, 4):
try:
if sys.version_info >= (3, 5):
marshal_data = importlib._bootstrap_external._validate_bytecode_header(fp.read())
else:
marshal_data = importlib._bootstrap._validate_bytecode_header(fp.read())
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(marshal_data)
else:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError("Bad magic number in %s" % pathname)
fp.read(4)
if sys.version_info >= (3, 3):
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
# This function is provided here since the Python library version has a bug
# (see bpo-35376)
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
fullname = name + "." + sub
if fullname in self.badmodules:
self._add_badmodule(fullname, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(fullname, caller)
def find_module(self, name, path=None, parent=None):
""" Finds a module with the indicated name on the given search path
(or self.path if None). Returns a tuple like (fp, path, stuff), where
stuff is a tuple like (suffix, mode, type). """
if imp.is_frozen(name):
# Don't pick up modules that are frozen into p3dpython.
raise ImportError("'%s' is a frozen module" % (name))
if parent is not None:
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
raise ImportError(name)
# If we have a custom override for this module, we know we have it.
if fullname in overrideModules:
return (None, '', ('.py', 'r', imp.PY_SOURCE))
# If no search path is given, look for a built-in module.
if path is None:
if name in sys.builtin_module_names:
return (None, None, ('', '', imp.C_BUILTIN))
path = self.path
if fullname == 'distutils' and hasattr(sys, 'real_prefix'):
# The PyPI version of virtualenv inserts a special version of
# distutils that does some bizarre stuff that won't work in our
# deployed application. Force it to find the regular one.
try:
fp, fn, stuff = self.find_module('opcode')
if fn:
path = [os.path.dirname(fn)] + path
except ImportError:
pass
# Look for the module on the search path.
ns_dirs = []
for dir_path in path:
basename = os.path.join(dir_path, name.split('.')[-1])
# Look for recognized extensions.
for stuff in self.suffixes:
suffix, mode, _ = stuff
fp = self._open_file(basename + suffix, mode)
if fp:
return (fp, basename + suffix, stuff)
# Consider a package, i.e. a directory containing __init__.py.
for suffix, mode, _ in self.suffixes:
init = os.path.join(basename, '__init__' + suffix)
if self._open_file(init, mode):
return (None, basename, ('', '', imp.PKG_DIRECTORY))
# This may be a namespace package.
if self._dir_exists(basename):
ns_dirs.append(basename)
# It wasn't found through the normal channels. Maybe it's one of
# ours, or maybe it's frozen?
if not path:
# Only if we're not looking on a particular path, though.
if p3extend_frozen and p3extend_frozen.is_frozen_module(name):
# It's a frozen module.
return (None, name, ('', '', imp.PY_FROZEN))
# If we found folders on the path with this module name without an
# __init__.py file, we should consider this a namespace package.
if ns_dirs and sys.version_info >= (3, 3):
return (None, ns_dirs, ('', '', _PKG_NAMESPACE_DIRECTORY))
raise ImportError(name)
def find_all_submodules(self, m):
# Overridden so that we can define our own suffixes.
if not m.__path__:
return
modules = {}
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in self.suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
|
[] |
[] |
[
"WindowsSdkDir",
"PATH",
"VCINSTALLDIR"
] |
[]
|
["WindowsSdkDir", "PATH", "VCINSTALLDIR"]
|
python
| 3 | 0 | |
example/main.go
|
package main
import (
"log"
"net/http"
"os"
red "github.com/achilles42/red"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func HandleHelloRequest() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello, World!"))
})
}
func main() {
addr := os.Getenv("ADDR")
mux := http.NewServeMux()
mux.Handle("/v1/hello", red.InstrumentationMiddleware(HandleHelloRequest()))
mux.Handle("/metrics", promhttp.Handler())
log.Printf("server is listening at %s", addr)
log.Fatal(http.ListenAndServe(addr, mux))
}
|
[
"\"ADDR\""
] |
[] |
[
"ADDR"
] |
[]
|
["ADDR"]
|
go
| 1 | 0 | |
real_estate/real_estate_deploy/real_estate/wsgi.py
|
"""
WSGI config for real_estate project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'real_estate.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Cython/Compiler/Symtab.py
|
#
# Symbol Table
#
from __future__ import absolute_import
import re
import copy
import operator
try:
import __builtin__ as builtins
except ImportError: # Py3
import builtins
from .Errors import warning, error, InternalError
from .StringEncoding import EncodedString
from . import Options, Naming
from . import PyrexTypes
from .PyrexTypes import py_object_type, unspecified_type
from .TypeSlots import (
pyfunction_signature, pymethod_signature, richcmp_special_methods,
get_special_method_signature, get_property_accessor_signature)
from . import Future
from . import Code
iso_c99_keywords = set(
['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
'volatile', 'while',
'_Bool', '_Complex'', _Imaginary', 'inline', 'restrict'])
def c_safe_identifier(cname):
# There are some C limitations on struct entry names.
if ((cname[:2] == '__' and not (cname.startswith(Naming.pyrex_prefix)
or cname in ('__weakref__', '__dict__')))
or cname in iso_c99_keywords):
cname = Naming.pyrex_prefix + cname
return cname
def punycodify_name(cname, mangle_with=None):
# if passed the mangle_with should be a byte string
# modified from PEP489
try:
cname.encode('ascii')
except UnicodeEncodeError:
cname = cname.encode('punycode').replace(b'-', b'_').decode('ascii')
if mangle_with:
# sometimes it necessary to mangle unicode names alone where
# they'll be inserted directly into C, because the punycode
# transformation can turn them into invalid identifiers
cname = "%s_%s" % (mangle_with, cname)
elif cname.startswith(Naming.pyrex_prefix):
# a punycode name could also be a valid ascii variable name so
# change the prefix to distinguish
cname = cname.replace(Naming.pyrex_prefix,
Naming.pyunicode_identifier_prefix, 1)
return cname
class BufferAux(object):
writable_needed = False
def __init__(self, buflocal_nd_var, rcbuf_var):
self.buflocal_nd_var = buflocal_nd_var
self.rcbuf_var = rcbuf_var
def __repr__(self):
return "<BufferAux %r>" % self.__dict__
class Entry(object):
# A symbol table entry in a Scope or ModuleNamespace.
#
# name string Python name of entity
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
# annotation ExprNode PEP 484/526 annotation
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is an entry in the Python builtins dict
# is_cglobal boolean Is a C global variable
# is_pyglobal boolean Is a Python module-level variable
# or class attribute during
# class construction
# is_member boolean Is an assigned class member
# is_pyclass_attr boolean Is a name in a Python class namespace
# is_variable boolean Is a variable
# is_cfunction boolean Is a C function
# is_cmethod boolean Is a C method of an extension type
# is_builtin_cmethod boolean Is a C method of a builtin type (implies is_cmethod)
# is_unbound_cmethod boolean Is an unbound C method of an extension type
# is_final_cmethod boolean Is non-overridable C method
# is_inline_cmethod boolean Is inlined C method
# is_anonymous boolean Is a anonymous pyfunction entry
# is_type boolean Is a type definition
# is_cclass boolean Is an extension class
# is_cpp_class boolean Is a C++ class
# is_const boolean Is a constant
# is_property boolean Is a property of an extension type:
# doc_cname string or None C const holding the docstring
# getter_cname string C func for getting property
# setter_cname string C func for setting or deleting property
# is_cproperty boolean Is an inline property of an external type
# is_self_arg boolean Is the "self" arg of an exttype method
# is_arg boolean Is the arg of a method
# is_local boolean Is a local variable
# in_closure boolean Is referenced in an inner scope
# in_subscope boolean Belongs to a generator expression scope
# is_readonly boolean Can't be assigned to
# func_cname string C func implementing Python func
# func_modifiers [string] C function modifiers ('inline')
# pos position Source position where declared
# namespace_cname string If is_pyglobal, the C variable
# holding its home namespace
# pymethdef_cname string PyMethodDef structure
# signature Signature Arg & return types for Python func
# as_variable Entry Alternative interpretation of extension
# type name or builtin C function as a variable
# xdecref_cleanup boolean Use Py_XDECREF for error cleanup
# in_cinclude boolean Suppress C declaration code
# enum_values [Entry] For enum types, list of values
# qualified_name string "modname.funcname" or "modname.classname"
# or "modname.classname.funcname"
# is_declared_generic boolean Is declared as PyObject * even though its
# type is an extension type
# as_module None Module scope, if a cimported module
# is_inherited boolean Is an inherited attribute of an extension type
# pystring_cname string C name of Python version of string literal
# is_interned boolean For string const entries, value is interned
# is_identifier boolean For string const entries, value is an identifier
# used boolean
# is_special boolean Is a special method or property accessor
# of an extension type
# defined_in_pxd boolean Is defined in a .pxd file (not just declared)
# api boolean Generate C API for C class or function
# utility_code string Utility code needed when this entry is used
#
# buffer_aux BufferAux or None Extra information needed for buffer variables
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
# Ideally this should not be necessary.
# might_overflow boolean In an arithmetic expression that could cause
# overflow (used for type inference).
# utility_code_definition For some Cython builtins, the utility code
# which contains the definition of the entry.
# Currently only supported for CythonScope entries.
# error_on_uninitialized Have Control Flow issue an error when this entry is
# used uninitialized
# cf_used boolean Entry is used
# is_fused_specialized boolean Whether this entry of a cdef or def function
# is a specialization
# is_cgetter boolean Is a c-level getter function
# TODO: utility_code and utility_code_definition serves the same purpose...
inline_func_in_pxd = False
borrowed = 0
init = ""
annotation = None
visibility = 'private'
is_builtin = 0
is_cglobal = 0
is_pyglobal = 0
is_member = 0
is_pyclass_attr = 0
is_variable = 0
is_cfunction = 0
is_cmethod = 0
is_builtin_cmethod = False
is_unbound_cmethod = 0
is_final_cmethod = 0
is_inline_cmethod = 0
is_anonymous = 0
is_type = 0
is_cclass = 0
is_cpp_class = 0
is_const = 0
is_property = 0
is_cproperty = 0
doc_cname = None
getter_cname = None
setter_cname = None
is_self_arg = 0
is_arg = 0
is_local = 0
in_closure = 0
from_closure = 0
in_subscope = 0
is_declared_generic = 0
is_readonly = 0
pyfunc_cname = None
func_cname = None
func_modifiers = []
final_func_cname = None
doc = None
as_variable = None
xdecref_cleanup = 0
in_cinclude = 0
as_module = None
is_inherited = 0
pystring_cname = None
is_identifier = 0
is_interned = 0
used = 0
is_special = 0
defined_in_pxd = 0
is_implemented = 0
api = 0
utility_code = None
is_overridable = 0
buffer_aux = None
prev_entry = None
might_overflow = 0
fused_cfunction = None
is_fused_specialized = False
utility_code_definition = None
needs_property = False
in_with_gil_block = 0
from_cython_utility_code = None
error_on_uninitialized = False
cf_used = True
outer_entry = None
is_cgetter = False
def __init__(self, name, cname, type, pos = None, init = None):
self.name = name
self.cname = cname
self.type = type
self.pos = pos
self.init = init
self.overloaded_alternatives = []
self.cf_assignments = []
self.cf_references = []
self.inner_entries = []
self.defining_entry = self
def __repr__(self):
return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type)
def already_declared_here(self):
error(self.pos, "Previous declaration is here")
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
self.already_declared_here()
def all_alternatives(self):
return [self] + self.overloaded_alternatives
def all_entries(self):
return [self] + self.inner_entries
def __lt__(left, right):
if isinstance(left, Entry) and isinstance(right, Entry):
return (left.name, left.cname) < (right.name, right.cname)
else:
return NotImplemented
@property
def cf_is_reassigned(self):
return len(self.cf_assignments) > 1
class InnerEntry(Entry):
"""
An entry in a closure scope that represents the real outer Entry.
"""
from_closure = True
def __init__(self, outer_entry, scope):
Entry.__init__(self, outer_entry.name,
outer_entry.cname,
outer_entry.type,
outer_entry.pos)
self.outer_entry = outer_entry
self.scope = scope
# share state with (outermost) defining entry
outermost_entry = outer_entry
while outermost_entry.outer_entry:
outermost_entry = outermost_entry.outer_entry
self.defining_entry = outermost_entry
self.inner_entries = outermost_entry.inner_entries
self.cf_assignments = outermost_entry.cf_assignments
self.cf_references = outermost_entry.cf_references
self.overloaded_alternatives = outermost_entry.overloaded_alternatives
self.inner_entries.append(self)
def __getattr__(self, name):
if name.startswith('__'):
# we wouldn't have been called if it was there
raise AttributeError(name)
return getattr(self.defining_entry, name)
def all_entries(self):
return self.defining_entry.all_entries()
class Scope(object):
# name string Unqualified name
# outer_scope Scope or None Enclosing scope
# entries {string : Entry} Python name to entry, non-types
# const_entries [Entry] Constant entries
# type_entries [Entry] Struct/union/enum/typedef/exttype entries
# sue_entries [Entry] Struct/union/enum entries
# arg_entries [Entry] Function argument entries
# var_entries [Entry] User-defined variable entries
# pyfunc_entries [Entry] Python function entries
# cfunc_entries [Entry] C function entries
# c_class_entries [Entry] All extension type entries
# cname_to_entry {string : Entry} Temp cname to entry mapping
# return_type PyrexType or None Return type of function owning scope
# is_builtin_scope boolean Is the builtin scope of Python/Cython
# is_py_class_scope boolean Is a Python class scope
# is_c_class_scope boolean Is an extension type scope
# is_closure_scope boolean Is a closure scope
# is_passthrough boolean Outer scope is passed directly
# is_cpp_class_scope boolean Is a C++ class scope
# is_property_scope boolean Is a extension type property scope
# scope_prefix string Disambiguator for C names
# in_cinclude boolean Suppress C declaration code
# qualified_name string "modname" or "modname.classname"
# Python strings in this scope
# nogil boolean In a nogil section
# directives dict Helper variable for the recursive
# analysis, contains directive values.
# is_internal boolean Is only used internally (simpler setup)
is_builtin_scope = 0
is_py_class_scope = 0
is_c_class_scope = 0
is_closure_scope = 0
is_genexpr_scope = 0
is_passthrough = 0
is_cpp_class_scope = 0
is_property_scope = 0
is_module_scope = 0
is_internal = 0
scope_prefix = ""
in_cinclude = 0
nogil = 0
fused_to_specific = None
return_type = None
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
# The parent_scope is used to derive the qualified name of this scope.
self.name = name
self.outer_scope = outer_scope
self.parent_scope = parent_scope
mangled_name = "%d%s_" % (len(name), name.replace('.', '_dot_'))
qual_scope = self.qualifying_scope()
if qual_scope:
self.qualified_name = qual_scope.qualify_name(name)
self.scope_prefix = qual_scope.scope_prefix + mangled_name
else:
self.qualified_name = EncodedString(name)
self.scope_prefix = mangled_name
self.entries = {}
self.subscopes = set()
self.const_entries = []
self.type_entries = []
self.sue_entries = []
self.arg_entries = []
self.var_entries = []
self.pyfunc_entries = []
self.cfunc_entries = []
self.c_class_entries = []
self.defined_c_classes = []
self.imported_c_classes = {}
self.cname_to_entry = {}
self.identifier_to_entry = {}
self.num_to_entry = {}
self.obj_to_entry = {}
self.buffer_entries = []
self.lambda_defs = []
self.id_counters = {}
def __deepcopy__(self, memo):
return self
def merge_in(self, other, merge_unused=True, whitelist=None):
# Use with care...
entries = []
for name, entry in other.entries.items():
if not whitelist or name in whitelist:
if entry.used or merge_unused:
entries.append((name, entry))
self.entries.update(entries)
for attr in ('const_entries',
'type_entries',
'sue_entries',
'arg_entries',
'var_entries',
'pyfunc_entries',
'cfunc_entries',
'c_class_entries'):
self_entries = getattr(self, attr)
names = set(e.name for e in self_entries)
for entry in getattr(other, attr):
if (entry.used or merge_unused) and entry.name not in names:
self_entries.append(entry)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
def qualifying_scope(self):
return self.parent_scope
def mangle(self, prefix, name = None):
if name:
return punycodify_name("%s%s%s" % (prefix, self.scope_prefix, name))
else:
return self.parent_scope.mangle(prefix, self.name)
def mangle_internal(self, name):
# Mangle an internal name so as not to clash with any
# user-defined name in this scope.
prefix = "%s%s_" % (Naming.pyrex_prefix, name)
return self.mangle(prefix)
#return self.parent_scope.mangle(prefix, self.name)
def mangle_class_private_name(self, name):
if self.parent_scope:
return self.parent_scope.mangle_class_private_name(name)
return name
def next_id(self, name=None):
# Return a cname fragment that is unique for this module
counters = self.global_scope().id_counters
try:
count = counters[name] + 1
except KeyError:
count = 0
counters[name] = count
if name:
if not count:
# unique names don't need a suffix, reoccurrences will get one
return name
return '%s%d' % (name, count)
else:
return '%d' % count
def global_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.global_scope()
def builtin_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.builtin_scope()
def iter_local_scopes(self):
yield self
if self.subscopes:
for scope in sorted(self.subscopes, key=operator.attrgetter('scope_prefix')):
yield scope
def declare(self, name, cname, type, pos, visibility, shadow = 0, is_type = 0, create_wrapper = 0):
# Create new entry, and add to dictionary if
# name is not None. Reports a warning if already
# declared.
if type.is_buffer and not isinstance(self, LocalScope): # and not is_type:
error(pos, 'Buffer types only allowed as function local variables')
if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname):
# See https://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries and not shadow:
old_entry = entries[name]
# Reject redeclared C++ functions only if they have the same type signature.
cpp_override_allowed = False
if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp():
for alt_entry in old_entry.all_alternatives():
if type == alt_entry.type:
if name == '<init>' and not type.args:
# Cython pre-declares the no-args constructor - allow later user definitions.
cpp_override_allowed = True
break
else:
cpp_override_allowed = True
if cpp_override_allowed:
# C++ function/method overrides with different signatures are ok.
pass
elif self.is_cpp_class_scope and entries[name].is_inherited:
# Likewise ignore inherited classes.
pass
elif visibility == 'extern':
# Silenced outside of "cdef extern" blocks, until we have a safe way to
# prevent pxd-defined cpdef functions from ending up here.
warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0)
elif visibility != 'ignore':
error(pos, "'%s' redeclared " % name)
entries[name].already_declared_here()
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
entry.create_wrapper = create_wrapper
if name:
entry.qualified_name = self.qualify_name(name)
# if name in entries and self.is_cpp():
# entries[name].overloaded_alternatives.append(entry)
# else:
# entries[name] = entry
if not shadow:
entries[name] = entry
if type.is_memoryviewslice:
entry.init = type.default_value
entry.scope = self
entry.visibility = visibility
return entry
def qualify_name(self, name):
return EncodedString("%s.%s" % (self.qualified_name, name))
def declare_const(self, name, type, value, pos, cname = None, visibility = 'private', api = 0, create_wrapper = 0):
# Add an entry for a named constant.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.enum_prefix, name)
entry = self.declare(name, cname, type, pos, visibility, create_wrapper = create_wrapper)
entry.is_const = 1
entry.value_node = value
return entry
def declare_type(self, name, type, pos,
cname = None, visibility = 'private', api = 0, defining = 1,
shadow = 0, template = 0):
# Add an entry for a type definition.
if not cname:
cname = name
entry = self.declare(name, cname, type, pos, visibility, shadow,
is_type=True)
entry.is_type = 1
entry.api = api
if defining:
self.type_entries.append(entry)
if not template:
type.entry = entry
# here we would set as_variable to an object representing this type
return entry
def declare_typedef(self, name, base_type, pos, cname = None,
visibility = 'private', api = 0):
if not cname:
if self.in_cinclude or (visibility != 'private' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
try:
if self.is_cpp_class_scope:
namespace = self.outer_scope.lookup(self.name).type
else:
namespace = None
type = PyrexTypes.create_typedef_type(name, base_type, cname,
(visibility == 'extern'),
namespace)
except ValueError as e:
error(pos, e.args[0])
type = PyrexTypes.error_type
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api)
type.qualified_name = entry.qualified_name
return entry
def declare_struct_or_union(self, name, kind, scope,
typedef_flag, pos, cname = None,
visibility = 'private', api = 0,
packed = False):
# Add an entry for a struct or union definition.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CStructOrUnionType(
name, kind, scope, typedef_flag, cname, packed)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api,
defining = scope is not None)
self.sue_entries.append(entry)
type.entry = entry
else:
if not (entry.is_type and entry.type.is_struct_or_union
and entry.type.kind == kind):
warning(pos, "'%s' redeclared " % name, 0)
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
self.check_previous_typedef_flag(entry, typedef_flag, pos)
self.check_previous_visibility(entry, visibility, pos)
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
if self.is_cpp_class_scope:
entry.type.namespace = self.outer_scope.lookup(self.name).type
return entry
def declare_cpp_class(self, name, scope,
pos, cname = None, base_classes = (),
visibility = 'extern', templates = None):
if cname is None:
if self.in_cinclude or (visibility != 'private'):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
base_classes = list(base_classes)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CppClassType(
name, scope, cname, base_classes, templates = templates)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, defining = scope is not None)
self.sue_entries.append(entry)
else:
if not (entry.is_type and entry.type.is_cpp_class):
error(pos, "'%s' redeclared " % name)
entry.already_declared_here()
return None
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
if base_classes:
if entry.type.base_classes and entry.type.base_classes != base_classes:
error(pos, "Base type does not match previous declaration")
entry.already_declared_here()
else:
entry.type.base_classes = base_classes
if templates or entry.type.templates:
if templates != entry.type.templates:
error(pos, "Template parameters do not match previous declaration")
entry.already_declared_here()
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
if base_class is PyrexTypes.error_type:
continue
if base_class.scope is None:
error(pos, "Cannot inherit from incomplete type")
else:
declare_inherited_attributes(entry, base_class.base_classes)
entry.type.scope.declare_inherited_cpp_attributes(base_class)
if scope:
declare_inherited_attributes(entry, base_classes)
scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos)
if self.is_cpp_class_scope:
entry.type.namespace = self.outer_scope.lookup(self.name).type
return entry
def check_previous_typedef_flag(self, entry, typedef_flag, pos):
if typedef_flag != entry.type.typedef_flag:
error(pos, "'%s' previously declared using '%s'" % (
entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
def check_previous_visibility(self, entry, visibility, pos):
if entry.visibility != visibility:
error(pos, "'%s' previously declared as '%s'" % (
entry.name, entry.visibility))
def declare_enum(self, name, pos, cname, typedef_flag,
visibility = 'private', api = 0, create_wrapper = 0):
if name:
if not cname:
if (self.in_cinclude or visibility == 'public'
or visibility == 'extern' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
if self.is_cpp_class_scope:
namespace = self.outer_scope.lookup(self.name).type
else:
namespace = None
type = PyrexTypes.CEnumType(name, cname, typedef_flag, namespace)
else:
type = PyrexTypes.c_anon_enum_type
entry = self.declare_type(name, type, pos, cname = cname,
visibility = visibility, api = api)
entry.create_wrapper = create_wrapper
entry.enum_values = []
self.sue_entries.append(entry)
return entry
def declare_tuple_type(self, pos, components):
return self.outer_scope.declare_tuple_type(pos, components)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a variable.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
if type.is_cpp_class and visibility != 'extern':
type.check_nullary_constructor(pos)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
entry.used = 1
if api:
entry.api = 1
entry.used = 1
return entry
def declare_builtin(self, name, pos):
name = self.mangle_class_private_name(name)
return self.outer_scope.declare_builtin(name, pos)
def _declare_pyfunction(self, name, pos, visibility='extern', entry=None):
if entry and not entry.type.is_cfunction:
error(pos, "'%s' already declared" % name)
error(entry.pos, "Previous declaration is here")
entry = self.declare_var(name, py_object_type, pos, visibility=visibility)
entry.signature = pyfunction_signature
self.pyfunc_entries.append(entry)
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False, visibility='extern'):
# Add an entry for a Python function.
entry = self.lookup_here(name)
if not allow_redefine:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
if entry:
if entry.type.is_unspecified:
entry.type = py_object_type
elif entry.type is not py_object_type:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
else: # declare entry stub
self.declare_var(name, py_object_type, pos, visibility=visibility)
entry = self.declare_var(None, py_object_type, pos,
cname=name, visibility='private')
entry.name = EncodedString(name)
entry.qualified_name = self.qualify_name(name)
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def declare_lambda_function(self, lambda_name, pos):
# Add an entry for an anonymous Python function.
func_cname = self.mangle(Naming.lambda_func_prefix + u'funcdef_', lambda_name)
pymethdef_cname = self.mangle(Naming.lambda_func_prefix + u'methdef_', lambda_name)
qualified_name = self.qualify_name(lambda_name)
entry = self.declare(None, func_cname, py_object_type, pos, 'private')
entry.name = EncodedString(lambda_name)
entry.qualified_name = qualified_name
entry.pymethdef_cname = pymethdef_cname
entry.func_cname = func_cname
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def add_lambda_def(self, def_node):
self.lambda_defs.append(def_node)
def register_pyfunction(self, entry):
self.pyfunc_entries.append(entry)
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
# Add an entry for a C function.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
entry = self.lookup_here(name)
if entry:
if not in_pxd and visibility != entry.visibility and visibility == 'extern':
# Previously declared, but now extern => treat this
# as implementing the function, using the new cname
defining = True
visibility = entry.visibility
entry.cname = cname
entry.func_cname = cname
if visibility != 'private' and visibility != entry.visibility:
warning(pos, "Function '%s' previously declared as '%s', now as '%s'" % (
name, entry.visibility, visibility), 1)
if overridable != entry.is_overridable:
warning(pos, "Function '%s' previously declared as '%s'" % (
name, 'cpdef' if overridable else 'cdef'), 1)
if entry.type.same_as(type):
# Fix with_gil vs nogil.
entry.type = entry.type.with_with_gil(type.with_gil)
else:
if visibility == 'extern' and entry.visibility == 'extern':
can_override = False
if self.is_cpp():
can_override = True
elif cname:
# if all alternatives have different cnames,
# it's safe to allow signature overrides
for alt_entry in entry.all_alternatives():
if not alt_entry.cname or cname == alt_entry.cname:
break # cname not unique!
else:
can_override = True
if can_override:
temp = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
temp.overloaded_alternatives = entry.all_alternatives()
entry = temp
else:
warning(pos, "Function signature does not match previous declaration", 1)
entry.type = type
elif not in_pxd and entry.defined_in_pxd and type.compatible_signature_with(entry.type):
# TODO: check that this was done by a signature optimisation and not a user error.
#warning(pos, "Function signature does not match previous declaration", 1)
entry.type = type
else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
entry.func_cname = cname
entry.is_overridable = overridable
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
if api:
entry.api = 1
if not defining and not in_pxd and visibility != 'extern':
error(pos, "Non-extern C function '%s' declared but not defined" % name)
if defining:
entry.is_implemented = True
if modifiers:
entry.func_modifiers = modifiers
if utility_code:
assert not entry.utility_code, "duplicate utility code definition in entry %s (%s)" % (name, cname)
entry.utility_code = utility_code
if overridable:
# names of cpdef functions can be used as variables and can be assigned to
var_entry = Entry(name, cname, py_object_type) # FIXME: cname?
var_entry.qualified_name = self.qualify_name(name)
var_entry.is_variable = 1
var_entry.is_pyglobal = 1
var_entry.scope = entry.scope
entry.as_variable = var_entry
type.entry = entry
return entry
def declare_cgetter(self, name, return_type, pos=None, cname=None,
visibility="private", modifiers=(), defining=False, **cfunc_type_config):
assert all(
k in ('exception_value', 'exception_check', 'nogil', 'with_gil', 'is_const_method', 'is_static_method')
for k in cfunc_type_config
)
cfunc_type = PyrexTypes.CFuncType(
return_type,
[PyrexTypes.CFuncTypeArg("self", self.parent_type, None)],
**cfunc_type_config)
entry = self.declare_cfunction(
name, cfunc_type, pos, cname=None, visibility=visibility, modifiers=modifiers, defining=defining)
entry.is_cgetter = True
if cname is not None:
entry.func_cname = cname
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos, visibility)
entry.is_cfunction = 1
if modifiers:
entry.func_modifiers = modifiers
if inherited or type.is_fused:
self.cfunc_entries.append(entry)
else:
# For backwards compatibility reasons, we must keep all non-fused methods
# before all fused methods, but separately for each type.
i = len(self.cfunc_entries)
for cfunc_entry in reversed(self.cfunc_entries):
if cfunc_entry.is_inherited or not cfunc_entry.type.is_fused:
break
i -= 1
self.cfunc_entries.insert(i, entry)
return entry
def find(self, name, pos):
# Look up name, report error if not found.
entry = self.lookup(name)
if entry:
return entry
else:
error(pos, "'%s' is not declared" % name)
def find_imported_module(self, path, pos):
# Look up qualified name, must be a module, report error if not found.
# Path is a list of names.
scope = self
for name in path:
entry = scope.find(name, pos)
if not entry:
return None
if entry.as_module:
scope = entry.as_module
else:
error(pos, "'%s' is not a cimported module" % '.'.join(path))
return None
return scope
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
mangled_name = self.mangle_class_private_name(name)
entry = (self.lookup_here(name) # lookup here also does mangling
or (self.outer_scope and self.outer_scope.lookup(mangled_name))
or None)
if entry:
return entry
# look up the original name in the outer scope
# Not strictly Python behaviour but see https://github.com/cython/cython/issues/3544
entry = (self.outer_scope and self.outer_scope.lookup(name)) or None
if entry and entry.is_pyglobal:
self._emit_class_private_warning(entry.pos, name)
return entry
def lookup_here(self, name):
# Look up in this scope only, return None if not found.
entry = self.entries.get(self.mangle_class_private_name(name), None)
if entry:
return entry
# Also check the unmangled name in the current scope
# (even if mangling should give us something else).
# This is to support things like global __foo which makes a declaration for __foo
return self.entries.get(name, None)
def lookup_here_unmangled(self, name):
return self.entries.get(name, None)
def lookup_target(self, name):
# Look up name in this scope only. Declare as Python
# variable if not found.
entry = self.lookup_here(name)
if not entry:
entry = self.lookup_here_unmangled(name)
if entry and entry.is_pyglobal:
self._emit_class_private_warning(entry.pos, name)
if not entry:
entry = self.declare_var(name, py_object_type, None)
return entry
def lookup_type(self, name):
entry = self.lookup(name)
if entry and entry.is_type:
if entry.type.is_fused and self.fused_to_specific:
return entry.type.specialize(self.fused_to_specific)
return entry.type
def lookup_operator(self, operator, operands):
if operands[0].type.is_cpp_class:
obj_type = operands[0].type
method = obj_type.scope.lookup("operator%s" % operator)
if method is not None:
arg_types = [arg.type for arg in operands[1:]]
res = PyrexTypes.best_match(arg_types, method.all_alternatives())
if res is not None:
return res
function = self.lookup("operator%s" % operator)
function_alternatives = []
if function is not None:
function_alternatives = function.all_alternatives()
# look-up nonmember methods listed within a class
method_alternatives = []
if len(operands) == 2: # binary operators only
for n in range(2):
if operands[n].type.is_cpp_class:
obj_type = operands[n].type
method = obj_type.scope.lookup("operator%s" % operator)
if method is not None:
method_alternatives += method.all_alternatives()
if (not method_alternatives) and (not function_alternatives):
return None
# select the unique alternatives
all_alternatives = list(set(method_alternatives + function_alternatives))
return PyrexTypes.best_match([arg.type for arg in operands],
all_alternatives)
def lookup_operator_for_types(self, pos, operator, types):
from .Nodes import Node
class FakeOperand(Node):
pass
operands = [FakeOperand(pos, type=type) for type in types]
return self.lookup_operator(operator, operands)
def _emit_class_private_warning(self, pos, name):
warning(pos, "Global name %s matched from within class scope "
"in contradiction to to Python 'class private name' rules. "
"This may change in a future release." % name, 1)
def use_utility_code(self, new_code):
self.global_scope().use_utility_code(new_code)
def use_entry_utility_code(self, entry):
self.global_scope().use_entry_utility_code(entry)
def defines_any(self, names):
# Test whether any of the given names are defined in this scope.
for name in names:
if name in self.entries:
return 1
return 0
def defines_any_special(self, names):
# Test whether any of the given names are defined as special methods in this scope.
for name in names:
if name in self.entries and self.entries[name].is_special:
return 1
return 0
def infer_types(self):
from .TypeInference import get_type_inferer
get_type_inferer().infer_types(self)
def is_cpp(self):
outer = self.outer_scope
if outer is None:
return False
else:
return outer.is_cpp()
def add_include_file(self, filename, verbatim_include=None, late=False):
self.outer_scope.add_include_file(filename, verbatim_include, late)
class PreImportScope(Scope):
namespace_cname = Naming.preimport_cname
def __init__(self):
Scope.__init__(self, Options.pre_import, None, None)
def declare_builtin(self, name, pos):
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_variable = True
entry.is_pyglobal = True
return entry
class BuiltinScope(Scope):
# The builtin namespace.
is_builtin_scope = True
def __init__(self):
if Options.pre_import is None:
Scope.__init__(self, "__builtin__", None, None)
else:
Scope.__init__(self, "__builtin__", PreImportScope(), None)
self.type_names = {}
for name, definition in sorted(self.builtin_entries.items()):
cname, type = definition
self.declare_var(name, type, None, cname)
def lookup(self, name, language_level=None, str_is_str=None):
# 'language_level' and 'str_is_str' are passed by ModuleScope
if name == 'str':
if str_is_str is None:
str_is_str = language_level in (None, 2)
if not str_is_str:
name = 'unicode'
return Scope.lookup(self, name)
def declare_builtin(self, name, pos):
if not hasattr(builtins, name):
if self.outer_scope is not None:
return self.outer_scope.declare_builtin(name, pos)
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
def declare_builtin_cfunction(self, name, type, cname, python_equiv=None, utility_code=None):
# If python_equiv == "*", the Python equivalent has the same name
# as the entry, otherwise it has the name specified by python_equiv.
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern', utility_code=utility_code)
if python_equiv:
if python_equiv == "*":
python_equiv = name
else:
python_equiv = EncodedString(python_equiv)
var_entry = Entry(python_equiv, python_equiv, py_object_type)
var_entry.qualified_name = self.qualify_name(name)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = entry.scope
entry.as_variable = var_entry
return entry
def declare_builtin_type(self, name, cname, utility_code = None, objstruct_cname = None):
name = EncodedString(name)
type = PyrexTypes.BuiltinObjectType(name, cname, objstruct_cname)
scope = CClassScope(name, outer_scope=None, visibility='extern')
scope.directives = {}
if name == 'bool':
type.is_final_type = True
type.set_scope(scope)
self.type_names[name] = 1
entry = self.declare_type(name, type, None, visibility='extern')
entry.utility_code = utility_code
var_entry = Entry(
name=entry.name,
type=self.lookup('type').type, # make sure "type" is the first type declared...
pos=entry.pos,
cname=entry.type.typeptr_cname,
)
var_entry.qualified_name = self.qualify_name(name)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = self
if Options.cache_builtins:
var_entry.is_const = True
entry.as_variable = var_entry
return type
def builtin_scope(self):
return self
builtin_entries = {
"type": ["((PyObject*)&PyType_Type)", py_object_type],
"bool": ["((PyObject*)&PyBool_Type)", py_object_type],
"int": ["((PyObject*)&PyInt_Type)", py_object_type],
"long": ["((PyObject*)&PyLong_Type)", py_object_type],
"float": ["((PyObject*)&PyFloat_Type)", py_object_type],
"complex":["((PyObject*)&PyComplex_Type)", py_object_type],
"bytes": ["((PyObject*)&PyBytes_Type)", py_object_type],
"bytearray": ["((PyObject*)&PyByteArray_Type)", py_object_type],
"str": ["((PyObject*)&PyString_Type)", py_object_type],
"unicode":["((PyObject*)&PyUnicode_Type)", py_object_type],
"tuple": ["((PyObject*)&PyTuple_Type)", py_object_type],
"list": ["((PyObject*)&PyList_Type)", py_object_type],
"dict": ["((PyObject*)&PyDict_Type)", py_object_type],
"set": ["((PyObject*)&PySet_Type)", py_object_type],
"frozenset": ["((PyObject*)&PyFrozenSet_Type)", py_object_type],
"slice": ["((PyObject*)&PySlice_Type)", py_object_type],
# "file": ["((PyObject*)&PyFile_Type)", py_object_type], # not in Py3
"None": ["Py_None", py_object_type],
"False": ["Py_False", py_object_type],
"True": ["Py_True", py_object_type],
}
const_counter = 1 # As a temporary solution for compiling code in pxds
class ModuleScope(Scope):
# module_name string Python name of the module
# module_cname string C name of Python module object
# #module_dict_cname string C name of module dict object
# method_table_cname string C name of method table
# doc string Module doc string
# doc_cname string C name of module doc string
# utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
# c_includes {key: IncludeCode} C headers or verbatim code to be generated
# See process_include() for more documentation
# identifier_to_entry {string : Entry} Map identifier string const to entry
# context Context
# parent_module Scope Parent in the import namespace
# module_entries {string : Entry} For cimport statements
# type_names {string : 1} Set of type names (used during parsing)
# included_files [string] Cython sources included with 'include'
# pxd_file_loaded boolean Corresponding .pxd file has been processed
# cimported_modules [ModuleScope] Modules imported with cimport
# types_imported {PyrexType} Set of types for which import code generated
# has_import_star boolean Module contains import *
# cpp boolean Compiling a C++ file
# is_cython_builtin boolean Is this the Cython builtin scope (or a child scope)
# is_package boolean Is this a package module? (__init__)
is_module_scope = 1
has_import_star = 0
is_cython_builtin = 0
old_style_globals = 0
def __init__(self, name, parent_module, context):
from . import Builtin
self.parent_module = parent_module
outer_scope = Builtin.builtin_scope
Scope.__init__(self, name, outer_scope, parent_module)
if name == "__init__":
# Treat Spam/__init__.pyx specially, so that when Python loads
# Spam/__init__.so, initSpam() is defined.
self.module_name = parent_module.module_name
self.is_package = True
else:
self.module_name = name
self.is_package = False
self.module_name = EncodedString(self.module_name)
self.context = context
self.module_cname = Naming.module_cname
self.module_dict_cname = Naming.moddict_cname
self.method_table_cname = Naming.methtable_cname
self.doc = ""
self.doc_cname = Naming.moddoc_cname
self.utility_code_list = []
self.module_entries = {}
self.c_includes = {}
self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
self.types_imported = set()
self.included_files = []
self.has_extern_class = 0
self.cached_builtins = []
self.undeclared_cached_builtins = []
self.namespace_cname = self.module_cname
self._cached_tuple_types = {}
for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__',
'__spec__', '__loader__', '__package__', '__cached__']:
self.declare_var(EncodedString(var_name), py_object_type, None)
self.process_include(Code.IncludeCode("Python.h", initial=True))
def qualifying_scope(self):
return self.parent_module
def global_scope(self):
return self
def lookup(self, name, language_level=None, str_is_str=None):
entry = self.lookup_here(name)
if entry is not None:
return entry
if language_level is None:
language_level = self.context.language_level if self.context is not None else 3
if str_is_str is None:
str_is_str = language_level == 2 or (
self.context is not None and Future.unicode_literals not in self.context.future_directives)
return self.outer_scope.lookup(name, language_level=language_level, str_is_str=str_is_str)
def declare_tuple_type(self, pos, components):
components = tuple(components)
try:
ttype = self._cached_tuple_types[components]
except KeyError:
ttype = self._cached_tuple_types[components] = PyrexTypes.c_tuple_type(components)
cname = ttype.cname
entry = self.lookup_here(cname)
if not entry:
scope = StructOrUnionScope(cname)
for ix, component in enumerate(components):
scope.declare_var(name="f%s" % ix, type=component, pos=pos)
struct_entry = self.declare_struct_or_union(
cname + '_struct', 'struct', scope, typedef_flag=True, pos=pos, cname=cname)
self.type_entries.remove(struct_entry)
ttype.struct_entry = struct_entry
entry = self.declare_type(cname, ttype, pos, cname)
ttype.entry = entry
return entry
def declare_builtin(self, name, pos):
if not hasattr(builtins, name) \
and name not in Code.non_portable_builtins_map \
and name not in Code.uncachable_builtins:
if self.has_import_star:
entry = self.declare_var(name, py_object_type, pos)
return entry
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
# unknown - assume it's builtin and look it up at runtime
entry = self.declare(name, None, py_object_type, pos, 'private')
entry.is_builtin = 1
return entry
if Options.cache_builtins:
for entry in self.cached_builtins:
if entry.name == name:
return entry
if name == 'globals' and not self.old_style_globals:
return self.outer_scope.lookup('__Pyx_Globals')
else:
entry = self.declare(None, None, py_object_type, pos, 'private')
if Options.cache_builtins and name not in Code.uncachable_builtins:
entry.is_builtin = 1
entry.is_const = 1 # cached
entry.name = name
entry.cname = Naming.builtin_prefix + name
self.cached_builtins.append(entry)
self.undeclared_cached_builtins.append(entry)
else:
entry.is_builtin = 1
entry.name = name
entry.qualified_name = self.builtin_scope().qualify_name(name)
return entry
def find_module(self, module_name, pos, relative_level=-1):
# Find a module in the import namespace, interpreting
# relative imports relative to this module's parent.
# Finds and parses the module's .pxd file if the module
# has not been referenced before.
relative_to = None
absolute_fallback = False
if relative_level is not None and relative_level > 0:
# explicit relative cimport
# error of going beyond top-level is handled in cimport node
relative_to = self
while relative_level > 0 and relative_to:
relative_to = relative_to.parent_module
relative_level -= 1
elif relative_level != 0:
# -1 or None: try relative cimport first, then absolute
relative_to = self.parent_module
absolute_fallback = True
module_scope = self.global_scope()
return module_scope.context.find_module(
module_name, relative_to=relative_to, pos=pos, absolute_fallback=absolute_fallback)
def find_submodule(self, name):
# Find and return scope for a submodule of this module,
# creating a new empty one if necessary. Doesn't parse .pxd.
if '.' in name:
name, submodule = name.split('.', 1)
else:
submodule = None
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name, parent_module=self, context=self.context)
self.module_entries[name] = scope
if submodule:
scope = scope.find_submodule(submodule)
return scope
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
if '.' in name:
name, submodule = name.split('.', 1)
else:
submodule = None
module = self.module_entries.get(name, None)
if submodule and module is not None:
module = module.lookup_submodule(submodule)
return module
def add_include_file(self, filename, verbatim_include=None, late=False):
"""
Add `filename` as include file. Add `verbatim_include` as
verbatim text in the C file.
Both `filename` and `verbatim_include` can be `None` or empty.
"""
inc = Code.IncludeCode(filename, verbatim_include, late=late)
self.process_include(inc)
def process_include(self, inc):
"""
Add `inc`, which is an instance of `IncludeCode`, to this
`ModuleScope`. This either adds a new element to the
`c_includes` dict or it updates an existing entry.
In detail: the values of the dict `self.c_includes` are
instances of `IncludeCode` containing the code to be put in the
generated C file. The keys of the dict are needed to ensure
uniqueness in two ways: if an include file is specified in
multiple "cdef extern" blocks, only one `#include` statement is
generated. Second, the same include might occur multiple times
if we find it through multiple "cimport" paths. So we use the
generated code (of the form `#include "header.h"`) as dict key.
If verbatim code does not belong to any include file (i.e. it
was put in a `cdef extern from *` block), then we use a unique
dict key: namely, the `sortkey()`.
One `IncludeCode` object can contain multiple pieces of C code:
one optional "main piece" for the include file and several other
pieces for the verbatim code. The `IncludeCode.dict_update`
method merges the pieces of two different `IncludeCode` objects
if needed.
"""
key = inc.mainpiece()
if key is None:
key = inc.sortkey()
inc.dict_update(self.c_includes, key)
inc = self.c_includes[key]
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
for inc in scope.c_includes.values():
self.process_include(inc)
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
def add_imported_entry(self, name, entry, pos):
if entry.is_pyglobal:
# Allow cimports to follow imports.
entry.is_variable = True
if entry not in self.entries:
self.entries[name] = entry
else:
warning(pos, "'%s' redeclared " % name, 0)
def declare_module(self, name, scope, pos):
# Declare a cimported module. This is represented as a
# Python module-level variable entry with a module
# scope attached to it. Reports an error and returns
# None if previously declared as something else.
entry = self.lookup_here(name)
if entry:
if entry.is_pyglobal and entry.as_module is scope:
return entry # Already declared as the same module
if not (entry.is_pyglobal and not entry.as_module):
# SAGE -- I put this here so Pyrex
# cimport's work across directories.
# Currently it tries to multiply define
# every module appearing in an import list.
# It shouldn't be an error for a module
# name to appear again, and indeed the generated
# code compiles fine.
return entry
else:
entry = self.declare_var(name, py_object_type, pos)
entry.is_variable = 0
entry.as_module = scope
self.add_imported_module(scope)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a global variable. If it is a Python
# object type, and not declared with cdef, it will live
# in the module dictionary, otherwise it will be a C
# global variable.
if visibility not in ('private', 'public', 'extern'):
error(pos, "Module-level variable cannot be declared %s" % visibility)
if not is_cdef:
if type is unspecified_type:
type = py_object_type
if not (type.is_pyobject and not type.is_extension_type):
raise InternalError(
"Non-cdef global variable is not a generic Python object")
if not cname:
defining = not in_pxd
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
#if visibility != 'private' and visibility != entry.visibility:
# warning(pos, "Variable '%s' previously declared as '%s'" % (name, entry.visibility), 1)
if not entry.type.same_as(type):
if visibility == 'extern' and entry.visibility == 'extern':
warning(pos, "Variable '%s' type does not match previous declaration" % name, 1)
entry.type = type
#else:
# error(pos, "Variable '%s' type does not match previous declaration" % name)
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = name
if not entry.is_implemented:
entry.is_implemented = True
return entry
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if is_cdef:
entry.is_cglobal = 1
if entry.type.declaration_value:
entry.init = entry.type.declaration_value
self.var_entries.append(entry)
else:
entry.is_pyglobal = 1
if Options.cimport_from_pyx:
entry.used = 1
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
if not defining and 'inline' in modifiers:
# TODO(github/1736): Make this an error.
warning(pos, "Declarations should not be declared inline.", 1)
# Add an entry for a C function.
if not cname:
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
if visibility == 'extern' and type.optional_arg_count:
error(pos, "Extern functions cannot have default arguments values.")
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = cname
entry.func_cname = cname
entry = Scope.declare_cfunction(
self, name, type, pos,
cname=cname, visibility=visibility, api=api, in_pxd=in_pxd,
defining=defining, modifiers=modifiers, utility_code=utility_code,
overridable=overridable)
return entry
def declare_global(self, name, pos):
entry = self.lookup_here(name)
if not entry:
self.declare_var(name, py_object_type, pos)
def use_utility_code(self, new_code):
if new_code is not None:
self.utility_code_list.append(new_code)
def use_entry_utility_code(self, entry):
if entry is None:
return
if entry.utility_code:
self.utility_code_list.append(entry.utility_code)
if entry.utility_code_definition:
self.utility_code_list.append(entry.utility_code_definition)
def declare_c_class(self, name, pos, defining=0, implementing=0,
module_name=None, base_type=None, objstruct_cname=None,
typeobj_cname=None, typeptr_cname=None, visibility='private',
typedef_flag=0, api=0, check_size=None,
buffer_defaults=None, shadow=0):
# If this is a non-extern typedef class, expose the typedef, but use
# the non-typedef struct internally to avoid needing forward
# declarations for anonymous structs.
if typedef_flag and visibility != 'extern':
if not (visibility == 'public' or api):
warning(pos, "ctypedef only valid for 'extern' , 'public', and 'api'", 2)
objtypedef_cname = objstruct_cname
typedef_flag = 0
else:
objtypedef_cname = None
#
# Look for previous declaration as a type
#
entry = self.lookup_here(name)
if entry and not shadow:
type = entry.type
if not (entry.is_type and type.is_extension_type):
entry = None # Will cause redeclaration and produce an error
else:
scope = type.scope
if typedef_flag and (not scope or scope.defined):
self.check_previous_typedef_flag(entry, typedef_flag, pos)
if (scope and scope.defined) or (base_type and type.base_type):
if base_type and base_type is not type.base_type:
error(pos, "Base type does not match previous declaration")
if base_type and not type.base_type:
type.base_type = base_type
#
# Make a new entry if needed
#
if not entry or shadow:
type = PyrexTypes.PyExtensionType(
name, typedef_flag, base_type, visibility == 'extern', check_size=check_size)
type.pos = pos
type.buffer_defaults = buffer_defaults
if objtypedef_cname is not None:
type.objtypedef_cname = objtypedef_cname
if visibility == 'extern':
type.module_name = module_name
else:
type.module_name = self.qualified_name
if typeptr_cname:
type.typeptr_cname = typeptr_cname
else:
type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
entry = self.declare_type(name, type, pos, visibility = visibility,
defining = 0, shadow = shadow)
entry.is_cclass = True
if objstruct_cname:
type.objstruct_cname = objstruct_cname
elif not entry.in_cinclude:
type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
else:
error(entry.pos,
"Object name required for 'public' or 'extern' C class")
self.attach_var_entry_to_c_class(entry)
self.c_class_entries.append(entry)
#
# Check for re-definition and create scope if needed
#
if not type.scope:
if defining or implementing:
scope = CClassScope(name = name, outer_scope = self,
visibility = visibility)
scope.directives = self.directives.copy()
if base_type and base_type.scope:
scope.declare_inherited_c_attributes(base_type.scope)
type.set_scope(scope)
self.type_entries.append(entry)
else:
if defining and type.scope.defined:
error(pos, "C class '%s' already defined" % name)
elif implementing and type.scope.implemented:
error(pos, "C class '%s' already implemented" % name)
#
# Fill in options, checking for compatibility with any previous declaration
#
if defining:
entry.defined_in_pxd = 1
if implementing: # So that filenames in runtime exceptions refer to
entry.pos = pos # the .pyx file and not the .pxd file
if visibility != 'private' and entry.visibility != visibility:
error(pos, "Class '%s' previously declared as '%s'"
% (name, entry.visibility))
if api:
entry.api = 1
if objstruct_cname:
if type.objstruct_cname and type.objstruct_cname != objstruct_cname:
error(pos, "Object struct name differs from previous declaration")
type.objstruct_cname = objstruct_cname
if typeobj_cname:
if type.typeobj_cname and type.typeobj_cname != typeobj_cname:
error(pos, "Type object name differs from previous declaration")
type.typeobj_cname = typeobj_cname
if self.directives.get('final'):
entry.type.is_final_type = True
# cdef classes are always exported, but we need to set it to
# distinguish between unused Cython utility code extension classes
entry.used = True
#
# Return new or existing entry
#
return entry
def allocate_vtable_names(self, entry):
# If extension type has a vtable, allocate vtable struct and
# slot names for it.
type = entry.type
if type.base_type and type.base_type.vtabslot_cname:
#print "...allocating vtabslot_cname because base type has one" ###
type.vtabslot_cname = "%s.%s" % (
Naming.obj_base_cname, type.base_type.vtabslot_cname)
elif type.scope and type.scope.cfunc_entries:
# one special case here: when inheriting from builtin
# types, the methods may also be built-in, in which
# case they won't need a vtable
entry_count = len(type.scope.cfunc_entries)
base_type = type.base_type
while base_type:
# FIXME: this will break if we ever get non-inherited C methods
if not base_type.scope or entry_count > len(base_type.scope.cfunc_entries):
break
if base_type.is_builtin_type:
# builtin base type defines all methods => no vtable needed
return
base_type = base_type.base_type
#print "...allocating vtabslot_cname because there are C methods" ###
type.vtabslot_cname = Naming.vtabslot_cname
if type.vtabslot_cname:
#print "...allocating other vtable related cnames" ###
type.vtabstruct_cname = self.mangle(Naming.vtabstruct_prefix, entry.name)
type.vtabptr_cname = self.mangle(Naming.vtabptr_prefix, entry.name)
def check_c_classes_pxd(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the .pxd.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is fully declared
#
# Also allocates a name for the vtable if needed.
#
for entry in self.c_class_entries:
# Check defined
if not entry.type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % entry.name)
def check_c_class(self, entry):
type = entry.type
name = entry.name
visibility = entry.visibility
# Check defined
if not type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % name)
# Generate typeobj_cname
if visibility != 'extern' and not type.typeobj_cname:
type.typeobj_cname = self.mangle(Naming.typeobj_prefix, name)
## Generate typeptr_cname
#type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
# Check C methods defined
if type.scope:
for method_entry in type.scope.cfunc_entries:
if not method_entry.is_inherited and not method_entry.func_cname:
error(method_entry.pos, "C method '%s' is declared but not defined" %
method_entry.name)
# Allocate vtable name if necessary
if type.vtabslot_cname:
#print "ModuleScope.check_c_classes: allocating vtable cname for", self ###
type.vtable_cname = self.mangle(Naming.vtable_prefix, entry.name)
def check_c_classes(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the main
# .pyx file scope, not for cimported .pxd scopes.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is implemented
# * All required object and type names have been specified or generated
# * All non-inherited C methods are implemented
#
# Also allocates a name for the vtable if needed.
#
debug_check_c_classes = 0
if debug_check_c_classes:
print("Scope.check_c_classes: checking scope " + self.qualified_name)
for entry in self.c_class_entries:
if debug_check_c_classes:
print("...entry %s %s" % (entry.name, entry))
print("......type = ", entry.type)
print("......visibility = ", entry.visibility)
self.check_c_class(entry)
def check_c_functions(self):
# Performs post-analysis checking making sure all
# defined c functions are actually implemented.
for name, entry in self.entries.items():
if entry.is_cfunction:
if (entry.defined_in_pxd
and entry.scope is self
and entry.visibility != 'extern'
and not entry.in_cinclude
and not entry.is_implemented):
error(entry.pos, "Non-extern C function '%s' declared but not defined" % name)
def attach_var_entry_to_c_class(self, entry):
# The name of an extension class has to serve as both a type
# name and a variable name holding the type object. It is
# represented in the symbol table by a type entry with a
# variable entry attached to it. For the variable entry,
# we use a read-only C global variable whose name is an
# expression that refers to the type object.
from . import Builtin
var_entry = Entry(name = entry.name,
type = Builtin.type_type,
pos = entry.pos,
cname = entry.type.typeptr_cname)
var_entry.qualified_name = entry.qualified_name
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.scope = entry.scope
entry.as_variable = var_entry
def is_cpp(self):
return self.cpp
def infer_types(self):
from .TypeInference import PyObjectTypeInferer
PyObjectTypeInferer().infer_types(self)
class LocalScope(Scope):
# Does the function have a 'with gil:' block?
has_with_gil_block = False
# Transient attribute, used for symbol table variable declarations
_in_with_gil_block = False
def __init__(self, name, outer_scope, parent_scope = None):
if parent_scope is None:
parent_scope = outer_scope
Scope.__init__(self, name, outer_scope, parent_scope)
def mangle(self, prefix, name):
return punycodify_name(prefix + name)
def declare_arg(self, name, type, pos):
# Add an entry for an argument of a function.
name = self.mangle_class_private_name(name)
cname = self.mangle(Naming.var_prefix, name)
entry = self.declare(name, cname, type, pos, 'private')
entry.is_variable = 1
if type.is_pyobject:
entry.init = "0"
entry.is_arg = 1
#entry.borrowed = 1 # Not using borrowed arg refs for now
self.arg_entries.append(entry)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
name = self.mangle_class_private_name(name)
# Add an entry for a local variable.
if visibility in ('public', 'readonly'):
error(pos, "Local variable cannot be declared %s" % visibility)
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if entry.type.declaration_value:
entry.init = entry.type.declaration_value
entry.is_local = 1
entry.in_with_gil_block = self._in_with_gil_block
self.var_entries.append(entry)
return entry
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
warning(pos, "'%s' redeclared ", 0)
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
def declare_nonlocal(self, name, pos):
# Pull entry from outer scope into local scope
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None or not entry.from_closure:
error(pos, "no binding for nonlocal '%s' found" % name)
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
entry = Scope.lookup(self, name)
if entry is not None:
entry_scope = entry.scope
while entry_scope.is_genexpr_scope:
entry_scope = entry_scope.outer_scope
if entry_scope is not self and entry_scope.is_closure_scope:
if hasattr(entry.scope, "scope_class"):
raise InternalError("lookup() after scope class created.")
# The actual c fragment for the different scopes differs
# on the outside and inside, so we make a new entry
entry.in_closure = True
inner_entry = InnerEntry(entry, self)
inner_entry.is_variable = True
self.entries[name] = inner_entry
return inner_entry
return entry
def mangle_closure_cnames(self, outer_scope_cname):
for scope in self.iter_local_scopes():
for entry in scope.entries.values():
if entry.from_closure:
cname = entry.outer_entry.cname
if self.is_passthrough:
entry.cname = cname
else:
if cname.startswith(Naming.cur_scope_cname):
cname = cname[len(Naming.cur_scope_cname)+2:]
entry.cname = "%s->%s" % (outer_scope_cname, cname)
elif entry.in_closure:
entry.original_cname = entry.cname
entry.cname = "%s->%s" % (Naming.cur_scope_cname, entry.cname)
class GeneratorExpressionScope(Scope):
"""Scope for generator expressions and comprehensions. As opposed
to generators, these can be easily inlined in some cases, so all
we really need is a scope that holds the loop variable(s).
"""
is_genexpr_scope = True
def __init__(self, outer_scope):
parent_scope = outer_scope
# TODO: also ignore class scopes?
while parent_scope.is_genexpr_scope:
parent_scope = parent_scope.parent_scope
name = parent_scope.global_scope().next_id(Naming.genexpr_id_ref)
Scope.__init__(self, name, outer_scope, parent_scope)
self.directives = outer_scope.directives
self.genexp_prefix = "%s%d%s" % (Naming.pyrex_prefix, len(name), name)
# Class/ExtType scopes are filled at class creation time, i.e. from the
# module init function or surrounding function.
while outer_scope.is_genexpr_scope or outer_scope.is_c_class_scope or outer_scope.is_py_class_scope:
outer_scope = outer_scope.outer_scope
self.var_entries = outer_scope.var_entries # keep declarations outside
outer_scope.subscopes.add(self)
def mangle(self, prefix, name):
return '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(prefix, name))
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = True):
if type is unspecified_type:
# if the outer scope defines a type for this variable, inherit it
outer_entry = self.outer_scope.lookup(name)
if outer_entry and outer_entry.is_variable:
type = outer_entry.type # may still be 'unspecified_type' !
# the parent scope needs to generate code for the variable, but
# this scope must hold its name exclusively
cname = '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(Naming.var_prefix, name or self.next_id()))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = True
if self.parent_scope.is_module_scope:
entry.is_cglobal = True
else:
entry.is_local = True
entry.in_subscope = True
self.var_entries.append(entry)
self.entries[name] = entry
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
return self.outer_scope.declare_pyfunction(
name, pos, allow_redefine)
def declare_lambda_function(self, func_cname, pos):
return self.outer_scope.declare_lambda_function(func_cname, pos)
def add_lambda_def(self, def_node):
return self.outer_scope.add_lambda_def(def_node)
class ClosureScope(LocalScope):
is_closure_scope = True
def __init__(self, name, scope_name, outer_scope, parent_scope=None):
LocalScope.__init__(self, name, outer_scope, parent_scope)
self.closure_cname = "%s%s" % (Naming.closure_scope_prefix, scope_name)
# def mangle_closure_cnames(self, scope_var):
# for entry in self.entries.values() + self.temp_entries:
# entry.in_closure = 1
# LocalScope.mangle_closure_cnames(self, scope_var)
# def mangle(self, prefix, name):
# return "%s->%s" % (self.cur_scope_cname, name)
# return "%s->%s" % (self.closure_cname, name)
def declare_pyfunction(self, name, pos, allow_redefine=False):
return LocalScope.declare_pyfunction(self, name, pos, allow_redefine, visibility='private')
class StructOrUnionScope(Scope):
# Namespace of a C struct or union.
def __init__(self, name="?"):
Scope.__init__(self, name, None, None)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0,
allow_pyobject=False, allow_memoryview=False):
# Add an entry for an attribute.
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
if type.is_cfunction:
type = PyrexTypes.CPtrType(type)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
error(pos, "C struct/union member cannot be a Python object")
elif type.is_memoryviewslice and not allow_memoryview:
# Memory views wrap their buffer owner as a Python object.
error(pos, "C struct/union member cannot be a memory view")
if visibility != 'private':
error(pos, "C struct/union member cannot be declared %s" % visibility)
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), overridable=False): # currently no utility code ...
if overridable:
error(pos, "C struct/union member cannot be declared 'cpdef'")
return self.declare_var(name, type, pos,
cname=cname, visibility=visibility)
class ClassScope(Scope):
# Abstract base class for namespace of
# Python class or extension type.
#
# class_name string Python name of the class
# scope_prefix string Additional prefix for names
# declared in the class
# doc string or None Doc string
def mangle_class_private_name(self, name):
# a few utilitycode names need to specifically be ignored
if name and name.lower().startswith("__pyx_"):
return name
if name and name.startswith('__') and not name.endswith('__'):
name = EncodedString('_%s%s' % (self.class_name.lstrip('_'), name))
return name
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, outer_scope)
self.class_name = name
self.doc = None
def lookup(self, name):
entry = Scope.lookup(self, name)
if entry:
return entry
if name == "classmethod":
# We don't want to use the builtin classmethod here 'cause it won't do the
# right thing in this scope (as the class members aren't still functions).
# Don't want to add a cfunction to this scope 'cause that would mess with
# the type definition, so we just return the right entry.
entry = Entry(
"classmethod",
"__Pyx_Method_ClassMethod",
PyrexTypes.CFuncType(
py_object_type,
[PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0))
entry.utility_code_definition = Code.UtilityCode.load_cached("ClassMethod", "CythonFunction.c")
self.use_entry_utility_code(entry)
entry.is_cfunction = 1
return entry
class PyClassScope(ClassScope):
# Namespace of a Python class.
#
# class_obj_cname string C variable holding class object
is_py_class_scope = 1
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
name = self.mangle_class_private_name(name)
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
entry.is_pyglobal = 1
entry.is_pyclass_attr = 1
return entry
def declare_nonlocal(self, name, pos):
# Pull entry from outer scope into local scope
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None:
error(pos, "no binding for nonlocal '%s' found" % name)
else:
# FIXME: this works, but it's unclear if it's the
# right thing to do
self.entries[name] = entry
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
warning(pos, "'%s' redeclared ", 0)
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
def add_default_value(self, type):
return self.outer_scope.add_default_value(type)
class CClassScope(ClassScope):
# Namespace of an extension type.
#
# parent_type PyExtensionType
# #typeobj_cname string or None
# #objstruct_cname string
# method_table_cname string
# getset_table_cname string
# has_pyobject_attrs boolean Any PyObject attributes?
# has_memoryview_attrs boolean Any memory view attributes?
# has_cpp_class_attrs boolean Any (non-pointer) C++ attributes?
# has_cyclic_pyobject_attrs boolean Any PyObject attributes that may need GC?
# property_entries [Entry]
# defined boolean Defined in .pxd file
# implemented boolean Defined in .pyx file
# inherited_var_entries [Entry] Adapted var entries from base class
is_c_class_scope = 1
is_closure_class_scope = False
has_pyobject_attrs = False
has_memoryview_attrs = False
has_cpp_class_attrs = False
has_cyclic_pyobject_attrs = False
defined = False
implemented = False
def __init__(self, name, outer_scope, visibility):
ClassScope.__init__(self, name, outer_scope)
if visibility != 'extern':
self.method_table_cname = outer_scope.mangle(Naming.methtab_prefix, name)
self.getset_table_cname = outer_scope.mangle(Naming.gstab_prefix, name)
self.property_entries = []
self.inherited_var_entries = []
def needs_gc(self):
# If the type or any of its base types have Python-valued
# C attributes, then it needs to participate in GC.
if self.has_cyclic_pyobject_attrs and not self.directives.get('no_gc', False):
return True
base_type = self.parent_type.base_type
if base_type and base_type.scope is not None:
return base_type.scope.needs_gc()
elif self.parent_type.is_builtin_type:
return not self.parent_type.is_gc_simple
return False
def needs_trashcan(self):
# If the trashcan directive is explicitly set to False,
# unconditionally disable the trashcan.
directive = self.directives.get('trashcan')
if directive is False:
return False
# If the directive is set to True and the class has Python-valued
# C attributes, then it should use the trashcan in tp_dealloc.
if directive and self.has_cyclic_pyobject_attrs:
return True
# Use the trashcan if the base class uses it
base_type = self.parent_type.base_type
if base_type and base_type.scope is not None:
return base_type.scope.needs_trashcan()
return self.parent_type.builtin_trashcan
def needs_tp_clear(self):
"""
Do we need to generate an implementation for the tp_clear slot? Can
be disabled to keep references for the __dealloc__ cleanup function.
"""
return self.needs_gc() and not self.directives.get('no_gc_clear', False)
def get_refcounted_entries(self, include_weakref=False,
include_gc_simple=True):
py_attrs = []
py_buffers = []
memoryview_slices = []
for entry in self.var_entries:
if entry.type.is_pyobject:
if include_weakref or (self.is_closure_class_scope or entry.name != "__weakref__"):
if include_gc_simple or not entry.type.is_gc_simple:
py_attrs.append(entry)
elif entry.type == PyrexTypes.c_py_buffer_type:
py_buffers.append(entry)
elif entry.type.is_memoryviewslice:
memoryview_slices.append(entry)
have_entries = py_attrs or py_buffers or memoryview_slices
return have_entries, (py_attrs, py_buffers, memoryview_slices)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
name = self.mangle_class_private_name(name)
if is_cdef:
# Add an entry for an attribute.
if self.defined:
error(pos,
"C attributes cannot be added in implementation part of"
" extension type defined in a pxd")
if not self.is_closure_class_scope and get_special_method_signature(name):
error(pos,
"The name '%s' is reserved for a special method."
% name)
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
cname = punycodify_name(cname, Naming.unicode_structmember_prefix)
if type.is_cpp_class and visibility != 'extern':
type.check_nullary_constructor(pos)
self.use_utility_code(Code.UtilityCode("#include <new>"))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_memoryviewslice:
self.has_memoryview_attrs = True
elif type.is_cpp_class:
self.has_cpp_class_attrs = True
elif type.is_pyobject and (self.is_closure_class_scope or name != '__weakref__'):
self.has_pyobject_attrs = True
if (not type.is_builtin_type
or not type.scope or type.scope.needs_gc()):
self.has_cyclic_pyobject_attrs = True
if visibility not in ('private', 'public', 'readonly'):
error(pos,
"Attribute of extension type cannot be declared %s" % visibility)
if visibility in ('public', 'readonly'):
# If the field is an external typedef, we cannot be sure about the type,
# so do conversion ourself rather than rely on the CPython mechanism (through
# a property; made in AnalyseDeclarationsTransform).
entry.needs_property = True
if not self.is_closure_class_scope and name == "__weakref__":
error(pos, "Special attribute __weakref__ cannot be exposed to Python")
if not (type.is_pyobject or type.can_coerce_to_pyobject(self)):
# we're not testing for coercion *from* Python here - that would fail later
error(pos, "C attribute of type '%s' cannot be accessed from Python" % type)
else:
entry.needs_property = False
return entry
else:
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
entry.is_member = 1
# xxx: is_pyglobal changes behaviour in so many places that I keep it in for now.
# is_member should be enough later on
entry.is_pyglobal = 1
self.namespace_cname = "(PyObject *)%s" % self.parent_type.typeptr_cname
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
if name in richcmp_special_methods:
if self.lookup_here('__richcmp__'):
error(pos, "Cannot define both % and __richcmp__" % name)
elif name == '__richcmp__':
for n in richcmp_special_methods:
if self.lookup_here(n):
error(pos, "Cannot define both % and __richcmp__" % n)
if name == "__new__":
error(pos, "__new__ method of extension type will change semantics "
"in a future version of Pyrex and Cython. Use __cinit__ instead.")
entry = self.declare_var(name, py_object_type, pos,
visibility='extern')
special_sig = get_special_method_signature(name)
if special_sig:
# Special methods get put in the method table with a particular
# signature declared in advance.
entry.signature = special_sig
entry.is_special = 1
else:
entry.signature = pymethod_signature
entry.is_special = 0
self.pyfunc_entries.append(entry)
return entry
def lookup_here(self, name):
if not self.is_closure_class_scope and name == "__new__":
name = EncodedString("__cinit__")
entry = ClassScope.lookup_here(self, name)
if entry and entry.is_builtin_cmethod:
if not self.parent_type.is_builtin_type:
# For subtypes of builtin types, we can only return
# optimised C methods if the type if final.
# Otherwise, subtypes may choose to override the
# method, but the optimisation would prevent the
# subtype method from being called.
if not self.parent_type.is_final_type:
return None
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
name = self.mangle_class_private_name(name)
if get_special_method_signature(name) and not self.parent_type.is_builtin_type:
error(pos, "Special methods must be declared with 'def', not 'cdef'")
args = type.args
if not type.is_static_method:
if not args:
error(pos, "C method has no self argument")
elif not self.parent_type.assignable_from(args[0].type):
error(pos, "Self argument (%s) of C method '%s' does not match parent type (%s)" %
(args[0].type, name, self.parent_type))
entry = self.lookup_here(name)
if cname is None:
cname = punycodify_name(c_safe_identifier(name), Naming.unicode_vtabentry_prefix)
if entry:
if not entry.is_cfunction:
warning(pos, "'%s' redeclared " % name, 0)
else:
if defining and entry.func_cname:
error(pos, "'%s' already defined" % name)
#print "CClassScope.declare_cfunction: checking signature" ###
if entry.is_final_cmethod and entry.is_inherited:
error(pos, "Overriding final methods is not allowed")
elif type.same_c_signature_as(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
# Fix with_gil vs nogil.
entry.type = entry.type.with_with_gil(type.with_gil)
elif type.compatible_signature_with(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
if (self.defined and not in_pxd
and not type.same_c_signature_as_resolved_type(
entry.type, as_cmethod=1, as_pxd_definition=1)):
# TODO(robertwb): Make this an error.
warning(pos,
"Compatible but non-identical C method '%s' not redeclared "
"in definition part of extension type '%s'. "
"This may cause incorrect vtables to be generated." % (
name, self.class_name), 2)
warning(entry.pos, "Previous declaration is here", 2)
entry = self.add_cfunction(name, type, pos, cname, visibility='ignore', modifiers=modifiers)
else:
error(pos, "Signature not compatible with previous declaration")
error(entry.pos, "Previous declaration is here")
else:
if self.defined:
error(pos,
"C method '%s' not previously declared in definition part of"
" extension type '%s'" % (name, self.class_name))
entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
if defining:
entry.func_cname = self.mangle(Naming.func_prefix, name)
entry.utility_code = utility_code
type.entry = entry
if u'inline' in modifiers:
entry.is_inline_cmethod = True
if self.parent_type.is_final_type or entry.is_inline_cmethod or self.directives.get('final'):
entry.is_final_cmethod = True
entry.final_func_cname = entry.func_cname
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
# Add a cfunction entry without giving it a func_cname.
prev_entry = self.lookup_here(name)
entry = ClassScope.add_cfunction(
self, name, type, pos, cname, visibility, modifiers, inherited=inherited)
entry.is_cmethod = 1
entry.prev_entry = prev_entry
return entry
def declare_builtin_cfunction(self, name, type, cname, utility_code = None):
# overridden methods of builtin types still have their Python
# equivalent that must be accessible to support bound methods
name = EncodedString(name)
entry = self.declare_cfunction(
name, type, pos=None, cname=cname, visibility='extern', utility_code=utility_code)
var_entry = Entry(name, name, py_object_type)
var_entry.qualified_name = name
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = entry.scope
entry.as_variable = var_entry
return entry
def declare_property(self, name, doc, pos, ctype=None, property_scope=None):
entry = self.lookup_here(name)
if entry is None:
entry = self.declare(name, name, py_object_type if ctype is None else ctype, pos, 'private')
entry.is_property = True
if ctype is not None:
entry.is_cproperty = True
entry.doc = doc
if property_scope is None:
entry.scope = PropertyScope(name, class_scope=self)
else:
entry.scope = property_scope
self.property_entries.append(entry)
return entry
def declare_cproperty(self, name, type, cfunc_name, doc=None, pos=None, visibility='extern',
nogil=False, with_gil=False, exception_value=None, exception_check=False,
utility_code=None):
"""Internal convenience method to declare a C property function in one go.
"""
property_entry = self.declare_property(name, doc=doc, ctype=type, pos=pos)
cfunc_entry = property_entry.scope.declare_cfunction(
name=name,
type=PyrexTypes.CFuncType(
type,
[PyrexTypes.CFuncTypeArg("self", self.parent_type, pos=None)],
nogil=nogil,
with_gil=with_gil,
exception_value=exception_value,
exception_check=exception_check,
),
cname=cfunc_name,
utility_code=utility_code,
visibility=visibility,
pos=pos,
)
return property_entry, cfunc_entry
def declare_inherited_c_attributes(self, base_scope):
# Declare entries for all the C attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
def adapt(cname):
return "%s.%s" % (Naming.obj_base_cname, base_entry.cname)
entries = base_scope.inherited_var_entries + base_scope.var_entries
for base_entry in entries:
entry = self.declare(
base_entry.name, adapt(base_entry.cname),
base_entry.type, None, 'private')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
# If the class defined in a pxd, specific entries have not been added.
# Ensure now that the parent (base) scope has specific entries
# Iterate over a copy as get_all_specialized_function_types() will mutate
for base_entry in base_scope.cfunc_entries[:]:
if base_entry.type.is_fused:
base_entry.type.get_all_specialized_function_types()
for base_entry in base_scope.cfunc_entries:
cname = base_entry.cname
var_entry = base_entry.as_variable
is_builtin = var_entry and var_entry.is_builtin
if not is_builtin:
cname = adapt(cname)
entry = self.add_cfunction(
base_entry.name, base_entry.type, base_entry.pos, cname,
base_entry.visibility, base_entry.func_modifiers, inherited=True)
entry.is_inherited = 1
if base_entry.is_final_cmethod:
entry.is_final_cmethod = True
entry.is_inline_cmethod = base_entry.is_inline_cmethod
if (self.parent_scope == base_scope.parent_scope or
entry.is_inline_cmethod):
entry.final_func_cname = base_entry.final_func_cname
if is_builtin:
entry.is_builtin_cmethod = True
entry.as_variable = var_entry
if base_entry.utility_code:
entry.utility_code = base_entry.utility_code
class CppClassScope(Scope):
# Namespace of a C++ class.
is_cpp_class_scope = 1
default_constructor = None
type = None
def __init__(self, name, outer_scope, templates=None):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
self.inherited_var_entries = []
if templates is not None:
for T in templates:
template_entry = self.declare(
T, T, PyrexTypes.TemplatePlaceholderType(T), None, 'extern')
template_entry.is_type = 1
def declare_var(self, name, type, pos,
cname = None, visibility = 'extern',
api = 0, in_pxd = 0, is_cdef = 0, defining = 0):
# Add an entry for an attribute.
if not cname:
cname = name
entry = self.lookup_here(name)
if defining and entry is not None:
if entry.type.same_as(type):
# Fix with_gil vs nogil.
entry.type = entry.type.with_with_gil(type.with_gil)
elif type.is_cfunction and type.compatible_signature_with(entry.type):
entry.type = type
else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if type.is_cfunction and self.type:
if not self.type.get_fused_types():
entry.func_cname = "%s::%s" % (self.type.empty_declaration_code(), cname)
if name != "this" and (defining or name != "<init>"):
self.var_entries.append(entry)
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='extern', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
class_name = self.name.split('::')[-1]
if name in (class_name, '__init__') and cname is None:
cname = "%s__init__%s" % (Naming.func_prefix, class_name)
name = EncodedString('<init>')
type.return_type = PyrexTypes.CVoidType()
# This is called by the actual constructor, but need to support
# arguments that cannot by called by value.
type.original_args = type.args
def maybe_ref(arg):
if arg.type.is_cpp_class and not arg.type.is_reference:
return PyrexTypes.CFuncTypeArg(
arg.name, PyrexTypes.c_ref_type(arg.type), arg.pos)
else:
return arg
type.args = [maybe_ref(arg) for arg in type.args]
elif name == '__dealloc__' and cname is None:
cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name)
name = EncodedString('<del>')
type.return_type = PyrexTypes.CVoidType()
if name in ('<init>', '<del>') and type.nogil:
for base in self.type.base_classes:
base_entry = base.scope.lookup(name)
if base_entry and not base_entry.type.nogil:
error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL")
error(base_entry.pos, "Base constructor defined here.")
prev_entry = self.lookup_here(name)
entry = self.declare_var(name, type, pos,
defining=defining,
cname=cname, visibility=visibility)
if prev_entry and not defining:
entry.overloaded_alternatives = prev_entry.all_alternatives()
entry.utility_code = utility_code
type.entry = entry
return entry
def declare_inherited_cpp_attributes(self, base_class):
base_scope = base_class.scope
template_type = base_class
while getattr(template_type, 'template_type', None):
template_type = template_type.template_type
if getattr(template_type, 'templates', None):
base_templates = [T.name for T in template_type.templates]
else:
base_templates = ()
# Declare entries for all the C++ attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
for base_entry in base_scope.inherited_var_entries + base_scope.var_entries:
#constructor/destructor is not inherited
if base_entry.name in ("<init>", "<del>"):
continue
#print base_entry.name, self.entries
if base_entry.name in self.entries:
base_entry.name # FIXME: is there anything to do in this case?
entry = self.declare(base_entry.name, base_entry.cname,
base_entry.type, None, 'extern')
entry.is_variable = 1
entry.is_inherited = 1
self.inherited_var_entries.append(entry)
for base_entry in base_scope.cfunc_entries:
entry = self.declare_cfunction(base_entry.name, base_entry.type,
base_entry.pos, base_entry.cname,
base_entry.visibility, api=0,
modifiers=base_entry.func_modifiers,
utility_code=base_entry.utility_code)
entry.is_inherited = 1
for base_entry in base_scope.type_entries:
if base_entry.name not in base_templates:
entry = self.declare_type(base_entry.name, base_entry.type,
base_entry.pos, base_entry.cname,
base_entry.visibility)
entry.is_inherited = 1
def specialize(self, values, type_entry):
scope = CppClassScope(self.name, self.outer_scope)
scope.type = type_entry
for entry in self.entries.values():
if entry.is_type:
scope.declare_type(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
template=1)
elif entry.type.is_cfunction:
for e in entry.all_alternatives():
scope.declare_cfunction(e.name,
e.type.specialize(values),
e.pos,
e.cname,
utility_code=e.utility_code)
else:
scope.declare_var(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
entry.visibility)
return scope
class PropertyScope(Scope):
# Scope holding the __get__, __set__ and __del__ methods for
# a property of an extension type.
#
# parent_type PyExtensionType The type to which the property belongs
is_property_scope = 1
def __init__(self, name, class_scope):
# outer scope is None for some internal properties
outer_scope = class_scope.global_scope() if class_scope.outer_scope else None
Scope.__init__(self, name, outer_scope, parent_scope=class_scope)
self.parent_type = class_scope.parent_type
self.directives = class_scope.directives
def declare_cfunction(self, name, type, pos, *args, **kwargs):
"""Declare a C property function.
"""
if type.return_type.is_void:
error(pos, "C property method cannot return 'void'")
if type.args and type.args[0].type is py_object_type:
# Set 'self' argument type to extension type.
type.args[0].type = self.parent_scope.parent_type
elif len(type.args) != 1:
error(pos, "C property method must have a single (self) argument")
elif not (type.args[0].type.is_pyobject or type.args[0].type is self.parent_scope.parent_type):
error(pos, "C property method must have a single (object) argument")
entry = Scope.declare_cfunction(self, name, type, pos, *args, **kwargs)
entry.is_cproperty = True
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
signature = get_property_accessor_signature(name)
if signature:
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_special = 1
entry.signature = signature
return entry
else:
error(pos, "Only __get__, __set__ and __del__ methods allowed "
"in a property declaration")
return None
class CConstOrVolatileScope(Scope):
def __init__(self, base_type_scope, is_const=0, is_volatile=0):
Scope.__init__(
self,
'cv_' + base_type_scope.name,
base_type_scope.outer_scope,
base_type_scope.parent_scope)
self.base_type_scope = base_type_scope
self.is_const = is_const
self.is_volatile = is_volatile
def lookup_here(self, name):
entry = self.base_type_scope.lookup_here(name)
if entry is not None:
entry = copy.copy(entry)
entry.type = PyrexTypes.c_const_or_volatile_type(
entry.type, self.is_const, self.is_volatile)
return entry
class TemplateScope(Scope):
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
dtmcli/utils.go
|
package dtmcli
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"os"
"path"
"runtime"
"strconv"
"strings"
"time"
"github.com/go-resty/resty/v2"
)
// P2E panic to error
func P2E(perr *error) {
if x := recover(); x != nil {
if e, ok := x.(error); ok {
*perr = e
} else {
panic(x)
}
}
}
// E2P error to panic
func E2P(err error) {
if err != nil {
panic(err)
}
}
// CatchP catch panic to error
func CatchP(f func()) (rerr error) {
defer P2E(&rerr)
f()
return nil
}
// PanicIf name is clear
func PanicIf(cond bool, err error) {
if cond {
panic(err)
}
}
// MustAtoi 走must逻辑
func MustAtoi(s string) int {
r, err := strconv.Atoi(s)
if err != nil {
E2P(errors.New("convert to int error: " + s))
}
return r
}
// OrString return the first not empty string
func OrString(ss ...string) string {
for _, s := range ss {
if s != "" {
return s
}
}
return ""
}
// If ternary operator
func If(condition bool, trueObj interface{}, falseObj interface{}) interface{} {
if condition {
return trueObj
}
return falseObj
}
// MustMarshal checked version for marshal
func MustMarshal(v interface{}) []byte {
b, err := json.Marshal(v)
E2P(err)
return b
}
// MustMarshalString string version of MustMarshal
func MustMarshalString(v interface{}) string {
return string(MustMarshal(v))
}
// MustUnmarshal checked version for unmarshal
func MustUnmarshal(b []byte, obj interface{}) {
err := json.Unmarshal(b, obj)
E2P(err)
}
// MustUnmarshalString string version of MustUnmarshal
func MustUnmarshalString(s string, obj interface{}) {
MustUnmarshal([]byte(s), obj)
}
// MustRemarshal marshal and unmarshal, and check error
func MustRemarshal(from interface{}, to interface{}) {
b, err := json.Marshal(from)
E2P(err)
err = json.Unmarshal(b, to)
E2P(err)
}
// Logf 输出日志
func Logf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
n := time.Now()
ts := fmt.Sprintf("%s.%03d", n.Format("2006-01-02 15:04:05"), n.Nanosecond()/1000000)
var file string
var line int
for i := 1; ; i++ {
_, file, line, _ = runtime.Caller(i)
if strings.Contains(file, "dtm") {
break
}
}
fmt.Printf("%s %s:%d %s\n", ts, path.Base(file), line, msg)
}
// LogRedf 采用红色打印错误类信息
func LogRedf(fmt string, args ...interface{}) {
Logf("\x1b[31m\n"+fmt+"\x1b[0m\n", args...)
}
// FatalExitFunc Fatal退出函数,测试时被替换
var FatalExitFunc = func() { os.Exit(1) }
// LogFatalf 采用红色打印错误类信息, 并退出
func LogFatalf(fmt string, args ...interface{}) {
Logf("\x1b[31m\n"+fmt+"\x1b[0m\n", args...)
FatalExitFunc()
}
// LogIfFatalf 采用红色打印错误类信息, 并退出
func LogIfFatalf(condition bool, fmt string, args ...interface{}) {
if condition {
LogFatalf(fmt, args...)
}
}
// FatalIfError 采用红色打印错误类信息, 并退出
func FatalIfError(err error) {
LogIfFatalf(err != nil, "Fatal error: %v", err)
}
// RestyClient the resty object
var RestyClient = resty.New()
func init() {
// RestyClient.SetTimeout(3 * time.Second)
// RestyClient.SetRetryCount(2)
// RestyClient.SetRetryWaitTime(1 * time.Second)
RestyClient.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error {
r.URL = MayReplaceLocalhost(r.URL)
Logf("requesting: %s %s %v %v", r.Method, r.URL, r.Body, r.QueryParam)
return nil
})
RestyClient.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error {
r := resp.Request
Logf("requested: %s %s %s", r.Method, r.URL, resp.String())
return nil
})
}
// GetFuncName get current call func name
func GetFuncName() string {
pc, _, _, _ := runtime.Caller(1)
return runtime.FuncForPC(pc).Name()
}
// MayReplaceLocalhost when run in docker compose, change localhost to host.docker.internal for accessing host network
func MayReplaceLocalhost(host string) string {
if os.Getenv("IS_DOCKER") != "" {
return strings.Replace(host, "localhost", "host.docker.internal", 1)
}
return host
}
var sqlDbs = map[string]*sql.DB{}
// SdbGet get pooled sql.DB
func SdbGet(conf map[string]string) (*sql.DB, error) {
dsn := GetDsn(conf)
if sqlDbs[dsn] == nil {
db, err := SdbAlone(conf)
if err != nil {
return nil, err
}
sqlDbs[dsn] = db
}
return sqlDbs[dsn], nil
}
// SdbAlone get a standalone db connection
func SdbAlone(conf map[string]string) (*sql.DB, error) {
dsn := GetDsn(conf)
Logf("opening alone %s: %s", conf["driver"], strings.Replace(dsn, conf["password"], "****", 1))
return sql.Open(conf["driver"], dsn)
}
// DBExec use raw db to exec
func DBExec(db DB, sql string, values ...interface{}) (affected int64, rerr error) {
r, rerr := db.Exec(sql, values...)
if rerr == nil {
affected, rerr = r.RowsAffected()
Logf("affected: %d for %s %v", affected, sql, values)
} else {
LogRedf("exec error: %v for %s %v", rerr, sql, values)
}
return
}
// DBQueryRow use raw tx to query row
func DBQueryRow(db DB, query string, args ...interface{}) *sql.Row {
Logf("querying: "+query, args...)
return db.QueryRow(query, args...)
}
// GetDsn get dsn from map config
func GetDsn(conf map[string]string) string {
conf["host"] = MayReplaceLocalhost(conf["host"])
driver := conf["driver"]
dsn := MS{
"mysql": fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=true&loc=Local",
conf["user"], conf["password"], conf["host"], conf["port"], conf["database"]),
"postgres": fmt.Sprintf("host=%s user=%s password=%s dbname='%s' port=%s sslmode=disable TimeZone=Asia/Shanghai",
conf["host"], conf["user"], conf["password"], conf["database"], conf["port"]),
}[driver]
PanicIf(dsn == "", fmt.Errorf("unknow driver: %s", driver))
return dsn
}
// CheckResponse 检查Response,返回错误
func CheckResponse(resp *resty.Response, err error) error {
if err == nil && resp != nil {
if resp.IsError() {
return errors.New(resp.String())
} else if strings.Contains(resp.String(), "FAILURE") {
return ErrFailure
}
}
return err
}
// CheckResult 检查Result,返回错误
func CheckResult(res interface{}, err error) error {
resp, ok := res.(*resty.Response)
if ok {
return CheckResponse(resp, err)
}
if res != nil {
str := MustMarshalString(res)
if strings.Contains(str, "FAILURE") {
return ErrFailure
} else if strings.Contains(str, "PENDING") {
return ErrPending
}
}
return err
}
|
[
"\"IS_DOCKER\""
] |
[] |
[
"IS_DOCKER"
] |
[]
|
["IS_DOCKER"]
|
go
| 1 | 0 | |
src/test/java/org/springframework/samples/petclinic/ui/AddDogTournamentPositiveUITest.java
|
package org.springframework.samples.petclinic.ui;
import java.util.regex.Pattern;
import java.util.concurrent.TimeUnit;
import org.junit.*;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.extension.ExtendWith;
import static org.junit.Assert.*;
import static org.hamcrest.CoreMatchers.*;
import org.openqa.selenium.*;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.openqa.selenium.support.ui.Select;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.web.server.LocalServerPort;
import org.springframework.test.context.junit.jupiter.SpringExtension;
//Prueba 8
@ExtendWith(SpringExtension.class)
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
public class AddDogTournamentPositiveUITest {
private WebDriver driver;
private String baseUrl;
private boolean acceptNextAlert = true;
private StringBuffer verificationErrors = new StringBuffer();
@LocalServerPort
private int port;
@BeforeEach
public void setUp() throws Exception {
String value = System.getenv("webdriver.gecko.driver");
System.setProperty("webdriver.gecko.driver", value );
driver = new FirefoxDriver();
baseUrl = "https://www.google.com/";
driver.manage().timeouts().implicitlyWait(30, TimeUnit.SECONDS);
}
@org.junit.jupiter.api.Test
public void testAddDogTournamentPositiveUI() throws Exception {
driver.get("http://localhost:" + this.port);
driver.findElement(By.xpath("//div[@id='main-navbar']/ul[2]/li/a")).click();
driver.findElement(By.id("username")).clear();
driver.findElement(By.id("username")).sendKeys("admin1");
driver.findElement(By.id("password")).click();
driver.findElement(By.id("password")).clear();
driver.findElement(By.id("password")).sendKeys("4dm1n");
driver.findElement(By.xpath("//button[@type='submit']")).click();
driver.findElement(By.xpath("//div[@id='main-navbar']/ul/li[4]/a/span[2]")).click();
driver.findElement(By.linkText("Beauty ConTEST4")).click();
driver.findElement(By.linkText("Add New Dog")).click();
driver.findElement(By.linkText("Beagle: Samantha - Owner: (Jean Coleman)")).click();
String putName = "Samantha";
String putOwner = "Jean Coleman";
driver.findElement(By.xpath("//dl")).click();
driver.findElement(By.xpath("//dd[3]")).click();
String owner = driver.findElement(By.xpath("//dd[3]")).getText();
driver.findElement(By.xpath("//dd")).click();
String name = driver.findElement(By.xpath("//dd")).getText();
assertEquals(putName, name);
assertEquals(putOwner, owner);
}
@AfterEach
public void tearDown() throws Exception {
driver.quit();
String verificationErrorString = verificationErrors.toString();
if (!"".equals(verificationErrorString)) {
fail(verificationErrorString);
}
}
private boolean isElementPresent(By by) {
try {
driver.findElement(by);
return true;
} catch (NoSuchElementException e) {
return false;
}
}
private boolean isAlertPresent() {
try {
driver.switchTo().alert();
return true;
} catch (NoAlertPresentException e) {
return false;
}
}
private String closeAlertAndGetItsText() {
try {
Alert alert = driver.switchTo().alert();
String alertText = alert.getText();
if (acceptNextAlert) {
alert.accept();
} else {
alert.dismiss();
}
return alertText;
} finally {
acceptNextAlert = true;
}
}
}
|
[
"\"webdriver.gecko.driver\""
] |
[] |
[
"webdriver.gecko.driver"
] |
[]
|
["webdriver.gecko.driver"]
|
java
| 1 | 0 | |
pytorch_lightning/plugins/training_type/parallel.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, List, Optional
import torch
from torch.nn.parallel import DistributedDataParallel
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import unwrap_lightning_module
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.training_type.training_type_plugin import TrainingTypePlugin
from pytorch_lightning.utilities import _XLA_AVAILABLE
from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, ReduceOp
class ParallelPlugin(TrainingTypePlugin, ABC):
""" Plugin for training with multiple processes in parallel. """
def __init__(
self,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
):
super().__init__()
self.parallel_devices = parallel_devices
self.cluster_environment = cluster_environment
@property
@abstractmethod
def root_device(self) -> torch.device:
raise NotImplementedError
@property
def on_gpu(self) -> bool:
return self.root_device.type == "cuda" and torch.cuda.is_available()
@property
def on_tpu(self) -> bool:
return self.root_device.type == "xla" and _XLA_AVAILABLE
@property
def lightning_module(self):
return unwrap_lightning_module(self._model)
@property
def global_rank(self) -> int:
return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0
@property
def local_rank(self) -> int:
return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0
@property
def node_rank(self) -> int:
return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0
@property
def world_size(self) -> int:
return self.cluster_environment.world_size() if self.cluster_environment is not None else 1
@property
def is_global_zero(self) -> bool:
return self.global_rank == 0
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=len(self.parallel_devices), rank=self.global_rank)
return distributed_sampler_kwargs
def reconciliate_processes(self, trace: str):
"""
Function to re-conciliate processes on failure
"""
def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor:
"""Perform a all_gather on all processes """
return all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)
def reduce_boolean_decision(self, decision: bool) -> bool:
decision = torch.tensor(int(decision), device=self.lightning_module.device)
decision = self.reduce(decision, reduce_op=ReduceOp.SUM)
decision = bool(decision == self.world_size)
return decision
@property
def torch_distributed_backend(self):
torch_backend = os.getenv("PL_TORCH_DISTRIBUTED_BACKEND")
if torch_backend is None:
torch_backend = "nccl" if self.on_gpu else "gloo"
return torch_backend
@staticmethod
def configure_sync_batchnorm(model: 'pl.LightningModule') -> 'pl.LightningModule':
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Override to synchronize batchnorm between specific process groups instead
of the whole world or use a different sync_bn like `apex`'s version.
Args:
model: pointer to current :class:`LightningModule`.
Return:
LightningModule with batchnorm layers synchronized between process groups
"""
return torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
@contextmanager
def block_backward_sync(self):
"""
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
Returns: context manager with sync behaviour off
"""
if isinstance(self.model, DistributedDataParallel):
with self.model.no_sync():
yield None
else:
yield None
def teardown(self) -> None:
# Un-reference the wrapper if any was used.
# todo (tchaton): Add support for all plugins.
if isinstance(self.model, DistributedDataParallel):
self.model = self.lightning_module
if self.on_gpu:
# GPU teardown
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
|
[] |
[] |
[
"PL_TORCH_DISTRIBUTED_BACKEND"
] |
[]
|
["PL_TORCH_DISTRIBUTED_BACKEND"]
|
python
| 1 | 0 | |
python-package/xgboost/core.py
|
# coding: utf-8
# pylint: disable=too-many-arguments, too-many-branches, invalid-name
# pylint: disable=too-many-branches, too-many-lines, too-many-locals
# pylint: disable=too-many-public-methods
"""Core XGBoost Library."""
import collections
# pylint: disable=no-name-in-module,import-error
from collections.abc import Mapping # Python 3
# pylint: enable=no-name-in-module,import-error
import ctypes
import os
import re
import sys
import warnings
import json
import numpy as np
import scipy.sparse
from .compat import (
STRING_TYPES, DataFrame, MultiIndex, Int64Index, py_str,
PANDAS_INSTALLED, DataTable,
CUDF_INSTALLED, CUDF_DataFrame, CUDF_Series, CUDF_MultiIndex,
os_fspath, os_PathLike)
from .libpath import find_lib_path
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64
class XGBoostError(Exception):
"""Error thrown by xgboost trainer."""
class EarlyStopException(Exception):
"""Exception to signal early stopping.
Parameters
----------
best_iteration : int
The best iteration stopped.
"""
def __init__(self, best_iteration):
super(EarlyStopException, self).__init__()
self.best_iteration = best_iteration
# Callback environment used by callbacks
CallbackEnv = collections.namedtuple(
"XGBoostCallbackEnv",
["model",
"cvfolds",
"iteration",
"begin_iteration",
"end_iteration",
"rank",
"evaluation_result_list"])
def from_pystr_to_cstr(data):
"""Convert a list of Python str to C pointer
Parameters
----------
data : list
list of str
"""
if not isinstance(data, list):
raise NotImplementedError
pointers = (ctypes.c_char_p * len(data))()
data = [bytes(d, 'utf-8') for d in data]
pointers[:] = data
return pointers
def from_cstr_to_pystr(data, length):
"""Revert C pointer to Python str
Parameters
----------
data : ctypes pointer
pointer to data
length : ctypes pointer
pointer to length of data
"""
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
res.append(str(data[i].decode('utf-8')))
return res
def _expect(expectations, got):
"""Translate input error into string.
Parameters
----------
expectations: sequence
a list of expected value.
got:
actual input
Returns
-------
msg: str
"""
msg = 'Expecting '
for t in range(len(expectations) - 1):
msg += str(expectations[t])
msg += ' or '
msg += str(expectations[-1])
msg += '. Got ' + str(got)
return msg
def _log_callback(msg):
"""Redirect logs from native library into Python console"""
print("{0:s}".format(py_str(msg)))
def _get_log_callback_func():
"""Wrap log_callback() method in ctypes callback type"""
# pylint: disable=invalid-name
CALLBACK = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
return CALLBACK(_log_callback)
def _load_lib():
"""Load xgboost Library."""
lib_paths = find_lib_path()
if not lib_paths:
return None
try:
pathBackup = os.environ['PATH'].split(os.pathsep)
except KeyError:
pathBackup = []
lib_success = False
os_error_list = []
for lib_path in lib_paths:
try:
# needed when the lib is linked with non-system-available
# dependencies
os.environ['PATH'] = os.pathsep.join(
pathBackup + [os.path.dirname(lib_path)])
lib = ctypes.cdll.LoadLibrary(lib_path)
lib_success = True
except OSError as e:
os_error_list.append(str(e))
continue
finally:
os.environ['PATH'] = os.pathsep.join(pathBackup)
if not lib_success:
libname = os.path.basename(lib_paths[0])
raise XGBoostError(
'XGBoost Library ({}) could not be loaded.\n'.format(libname) +
'Likely causes:\n' +
' * OpenMP runtime is not installed ' +
'(vcomp140.dll or libgomp-1.dll for Windows, ' +
'libgomp.so for UNIX-like OSes)\n' +
' * You are running 32-bit Python on a 64-bit OS\n' +
'Error message(s): {}\n'.format(os_error_list))
lib.XGBGetLastError.restype = ctypes.c_char_p
lib.callback = _get_log_callback_func()
if lib.XGBRegisterLogCallback(lib.callback) != 0:
raise XGBoostError(lib.XGBGetLastError())
return lib
# load the XGBoost library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
raise XGBoostError(py_str(_LIB.XGBGetLastError()))
def ctypes2numpy(cptr, length, dtype):
"""Convert a ctypes pointer array to a numpy array."""
NUMPY_TO_CTYPES_MAPPING = {
np.float32: ctypes.c_float,
np.uint32: ctypes.c_uint,
}
if dtype not in NUMPY_TO_CTYPES_MAPPING:
raise RuntimeError('Supported types: {}'.format(
NUMPY_TO_CTYPES_MAPPING.keys()))
ctype = NUMPY_TO_CTYPES_MAPPING[dtype]
if not isinstance(cptr, ctypes.POINTER(ctype)):
raise RuntimeError('expected {} pointer'.format(ctype))
res = np.zeros(length, dtype=dtype)
if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]):
raise RuntimeError('memmove failed')
return res
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type."""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
def c_str(string):
"""Convert a python string to cstring."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a python string to c array."""
if (isinstance(values, np.ndarray)
and values.dtype.itemsize == ctypes.sizeof(ctype)):
return (ctype * len(values)).from_buffer_copy(values)
return (ctype * len(values))(*values)
PANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int', 'int64':
'int', 'uint8': 'int', 'uint16': 'int', 'uint32': 'int',
'uint64': 'int', 'float16': 'float', 'float32': 'float',
'float64': 'float', 'bool': 'i'}
# Either object has cuda array interface or contains columns with interfaces
def _has_cuda_array_interface(data):
return hasattr(data, '__cuda_array_interface__') or (
CUDF_INSTALLED and isinstance(data, CUDF_DataFrame))
def _maybe_pandas_data(data, feature_names, feature_types,
meta=None, meta_type=None):
"""Extract internal data from pd.DataFrame for DMatrix data"""
if not (PANDAS_INSTALLED and isinstance(data, DataFrame)):
return data, feature_names, feature_types
data_dtypes = data.dtypes
if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes):
bad_fields = [
str(data.columns[i]) for i, dtype in enumerate(data_dtypes)
if dtype.name not in PANDAS_DTYPE_MAPPER
]
msg = """DataFrame.dtypes for data must be int, float or bool.
Did not expect the data types in fields """
raise ValueError(msg + ', '.join(bad_fields))
if feature_names is None and meta is None:
if isinstance(data.columns, MultiIndex):
feature_names = [
' '.join([str(x) for x in i]) for i in data.columns
]
elif isinstance(data.columns, Int64Index):
feature_names = list(map(str, data.columns))
else:
feature_names = data.columns.format()
if feature_types is None and meta is None:
feature_types = [
PANDAS_DTYPE_MAPPER[dtype.name] for dtype in data_dtypes
]
if meta and len(data.columns) > 1:
raise ValueError(
'DataFrame for {meta} cannot have multiple columns'.format(
meta=meta))
dtype = meta_type if meta_type else 'float'
data = data.values.astype(dtype)
return data, feature_names, feature_types
def _maybe_cudf_dataframe(data, feature_names, feature_types):
"""Extract internal data from cudf.DataFrame for DMatrix data."""
if not (CUDF_INSTALLED and isinstance(data,
(CUDF_DataFrame, CUDF_Series))):
return data, feature_names, feature_types
if feature_names is None:
if isinstance(data, CUDF_Series):
feature_names = [data.name]
elif isinstance(data.columns, CUDF_MultiIndex):
feature_names = [
' '.join([str(x) for x in i])
for i in data.columns
]
else:
feature_names = data.columns.format()
if feature_types is None:
if isinstance(data, CUDF_Series):
dtypes = [data.dtype]
else:
dtypes = data.dtypes
feature_types = [PANDAS_DTYPE_MAPPER[d.name] for d in dtypes]
return data, feature_names, feature_types
DT_TYPE_MAPPER = {'bool': 'bool', 'int': 'int', 'real': 'float'}
DT_TYPE_MAPPER2 = {'bool': 'i', 'int': 'int', 'real': 'float'}
def _maybe_dt_data(data, feature_names, feature_types,
meta=None, meta_type=None):
"""Validate feature names and types if data table"""
if not isinstance(data, DataTable):
return data, feature_names, feature_types
if meta and data.shape[1] > 1:
raise ValueError(
'DataTable for label or weight cannot have multiple columns')
if meta:
# below requires new dt version
# extract first column
data = data.to_numpy()[:, 0].astype(meta_type)
return data, None, None
data_types_names = tuple(lt.name for lt in data.ltypes)
bad_fields = [data.names[i]
for i, type_name in enumerate(data_types_names)
if type_name not in DT_TYPE_MAPPER]
if bad_fields:
msg = """DataFrame.types for data must be int, float or bool.
Did not expect the data types in fields """
raise ValueError(msg + ', '.join(bad_fields))
if feature_names is None and meta is None:
feature_names = data.names
# always return stypes for dt ingestion
if feature_types is not None:
raise ValueError(
'DataTable has own feature types, cannot pass them in.')
feature_types = np.vectorize(DT_TYPE_MAPPER2.get)(data_types_names)
return data, feature_names, feature_types
def _convert_dataframes(data, feature_names, feature_types,
meta=None, meta_type=None):
data, feature_names, feature_types = _maybe_pandas_data(data,
feature_names,
feature_types,
meta,
meta_type)
data, feature_names, feature_types = _maybe_dt_data(data,
feature_names,
feature_types,
meta,
meta_type)
data, feature_names, feature_types = _maybe_cudf_dataframe(
data, feature_names, feature_types)
return data, feature_names, feature_types
def _maybe_np_slice(data, dtype=np.float32):
'''Handle numpy slice. This can be removed if we use __array_interface__.
'''
try:
if not data.flags.c_contiguous:
warnings.warn(
"Use subset (sliced data) of np.ndarray is not recommended " +
"because it will generate extra copies and increase " +
"memory consumption")
data = np.array(data, copy=True, dtype=dtype)
else:
data = np.array(data, copy=False, dtype=dtype)
except AttributeError:
data = np.array(data, copy=False, dtype=dtype)
return data
class DMatrix(object):
"""Data Matrix used in XGBoost.
DMatrix is a internal data structure that used by XGBoost
which is optimized for both memory efficiency and training speed.
You can construct DMatrix from numpy.arrays
"""
_feature_names = None # for previous version's pickle
_feature_types = None
def __init__(self, data, label=None, weight=None, base_margin=None,
missing=None,
silent=False,
feature_names=None,
feature_types=None,
nthread=None):
"""Parameters
----------
data : os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/
dt.Frame/cudf.DataFrame/cupy.array
Data source of DMatrix.
When data is string or os.PathLike type, it represents the path
libsvm format txt file, csv file (by specifying uri parameter
'path_to_csv?format=csv'), or binary file that xgboost can read
from.
label : list, numpy 1-D array or cudf.DataFrame, optional
Label of the training data.
missing : float, optional
Value in the input data which needs to be present as a missing
value. If None, defaults to np.nan.
weight : list, numpy 1-D array or cudf.DataFrame , optional
Weight for each instance.
.. note:: For ranking task, weights are per-group.
In ranking task, one weight is assigned to each group (not each
data point). This is because we only care about the relative
ordering of data points within each group, so it doesn't make
sense to assign weights to individual data points.
silent : boolean, optional
Whether print messages during construction
feature_names : list, optional
Set names for features.
feature_types : list, optional
Set types for features.
nthread : integer, optional
Number of threads to use for loading data from numpy array. If -1,
uses maximum threads available on the system.
"""
# force into void_p, mac need to pass things in as void_p
if data is None:
self.handle = None
if feature_names is not None:
self._feature_names = feature_names
if feature_types is not None:
self._feature_types = feature_types
return
if isinstance(data, list):
raise TypeError('Input data can not be a list.')
data, feature_names, feature_types = _convert_dataframes(
data, feature_names, feature_types
)
if isinstance(data, (STRING_TYPES, os_PathLike)):
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromFile(c_str(os_fspath(data)),
ctypes.c_int(silent),
ctypes.byref(handle)))
self.handle = handle
elif isinstance(data, scipy.sparse.csr_matrix):
self._init_from_csr(data)
elif isinstance(data, scipy.sparse.csc_matrix):
self._init_from_csc(data)
elif isinstance(data, np.ndarray):
self._init_from_npy2d(data, missing, nthread)
elif isinstance(data, DataTable):
self._init_from_dt(data, nthread)
elif hasattr(data, "__cuda_array_interface__"):
self._init_from_array_interface(data, missing, nthread)
elif CUDF_INSTALLED and isinstance(data, CUDF_DataFrame):
self._init_from_array_interface_columns(data, missing, nthread)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self._init_from_csr(csr)
except Exception:
raise TypeError('can not initialize DMatrix from'
' {}'.format(type(data).__name__))
if label is not None:
self.set_label(label)
if weight is not None:
self.set_weight(weight)
if base_margin is not None:
self.set_base_margin(base_margin)
self.feature_names = feature_names
self.feature_types = feature_types
def _init_from_csr(self, csr):
"""Initialize data from a CSR matrix."""
if len(csr.indices) != len(csr.data):
raise ValueError('length mismatch: {} vs {}'.format(
len(csr.indices), len(csr.data)))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromCSREx(
c_array(ctypes.c_size_t, csr.indptr),
c_array(ctypes.c_uint, csr.indices),
c_array(ctypes.c_float, csr.data),
ctypes.c_size_t(len(csr.indptr)),
ctypes.c_size_t(len(csr.data)),
ctypes.c_size_t(csr.shape[1]),
ctypes.byref(handle)))
self.handle = handle
def _init_from_csc(self, csc):
"""Initialize data from a CSC matrix."""
if len(csc.indices) != len(csc.data):
raise ValueError('length mismatch: {} vs {}'.format(
len(csc.indices), len(csc.data)))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromCSCEx(
c_array(ctypes.c_size_t, csc.indptr),
c_array(ctypes.c_uint, csc.indices),
c_array(ctypes.c_float, csc.data),
ctypes.c_size_t(len(csc.indptr)),
ctypes.c_size_t(len(csc.data)),
ctypes.c_size_t(csc.shape[0]),
ctypes.byref(handle)))
self.handle = handle
def _init_from_npy2d(self, mat, missing, nthread):
"""Initialize data from a 2-D numpy matrix.
If ``mat`` does not have ``order='C'`` (aka row-major) or is
not contiguous, a temporary copy will be made.
If ``mat`` does not have ``dtype=numpy.float32``, a temporary copy will
be made.
So there could be as many as two temporary data copies; be mindful of
input layout and type if memory use is a concern.
"""
if len(mat.shape) != 2:
raise ValueError('Expecting 2 dimensional numpy.ndarray, got: ',
mat.shape)
# flatten the array by rows and ensure it is float32. we try to avoid
# data copies if possible (reshape returns a view when possible and we
# explicitly tell np.array to try and avoid copying)
data = np.array(mat.reshape(mat.size), copy=False, dtype=np.float32)
handle = ctypes.c_void_p()
missing = missing if missing is not None else np.nan
nthread = nthread if nthread is not None else 1
_check_call(_LIB.XGDMatrixCreateFromMat_omp(
data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
c_bst_ulong(mat.shape[0]),
c_bst_ulong(mat.shape[1]),
ctypes.c_float(missing),
ctypes.byref(handle),
c_bst_ulong(nthread)))
self.handle = handle
def _init_from_dt(self, data, nthread):
"""Initialize data from a datatable Frame."""
ptrs = (ctypes.c_void_p * data.ncols)()
if hasattr(data, "internal") and hasattr(data.internal, "column"):
# datatable>0.8.0
for icol in range(data.ncols):
col = data.internal.column(icol)
ptr = col.data_pointer
ptrs[icol] = ctypes.c_void_p(ptr)
else:
# datatable<=0.8.0
from datatable.internal import \
frame_column_data_r # pylint: disable=no-name-in-module,import-error
for icol in range(data.ncols):
ptrs[icol] = frame_column_data_r(data, icol)
# always return stypes for dt ingestion
feature_type_strings = (ctypes.c_char_p * data.ncols)()
for icol in range(data.ncols):
feature_type_strings[icol] = ctypes.c_char_p(
data.stypes[icol].name.encode('utf-8'))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromDT(
ptrs, feature_type_strings,
c_bst_ulong(data.shape[0]),
c_bst_ulong(data.shape[1]),
ctypes.byref(handle),
nthread))
self.handle = handle
def _init_from_array_interface_columns(self, df, missing, nthread):
"""Initialize DMatrix from columnar memory format."""
interfaces = []
for col in df:
interface = df[col].__cuda_array_interface__
if 'mask' in interface:
interface['mask'] = interface['mask'].__cuda_array_interface__
interfaces.append(interface)
handle = ctypes.c_void_p()
missing = missing if missing is not None else np.nan
nthread = nthread if nthread is not None else 1
interfaces_str = bytes(json.dumps(interfaces, indent=2), 'utf-8')
_check_call(
_LIB.XGDMatrixCreateFromArrayInterfaceColumns(
interfaces_str,
ctypes.c_float(missing), ctypes.c_int(nthread),
ctypes.byref(handle)))
self.handle = handle
def _init_from_array_interface(self, data, missing, nthread):
"""Initialize DMatrix from cupy ndarray."""
interface = data.__cuda_array_interface__
if 'mask' in interface:
interface['mask'] = interface['mask'].__cuda_array_interface__
interface_str = bytes(json.dumps(interface, indent=2), 'utf-8')
handle = ctypes.c_void_p()
missing = missing if missing is not None else np.nan
nthread = nthread if nthread is not None else 1
_check_call(
_LIB.XGDMatrixCreateFromArrayInterface(
interface_str,
ctypes.c_float(missing), ctypes.c_int(nthread),
ctypes.byref(handle)))
self.handle = handle
def __del__(self):
if hasattr(self, "handle") and self.handle:
_check_call(_LIB.XGDMatrixFree(self.handle))
self.handle = None
def get_float_info(self, field):
"""Get float property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of float information of the data
"""
length = c_bst_ulong()
ret = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGDMatrixGetFloatInfo(self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(ret)))
return ctypes2numpy(ret, length.value, np.float32)
def get_uint_info(self, field):
"""Get unsigned integer property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of unsigned integer information of the data
"""
length = c_bst_ulong()
ret = ctypes.POINTER(ctypes.c_uint)()
_check_call(_LIB.XGDMatrixGetUIntInfo(self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(ret)))
return ctypes2numpy(ret, length.value, np.uint32)
def set_float_info(self, field, data):
"""Set float type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
data, _, _ = _convert_dataframes(data, None, None, field, 'float')
if isinstance(data, np.ndarray):
self.set_float_info_npy2d(field, data)
return
c_data = c_array(ctypes.c_float, data)
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
c_str(field),
c_data,
c_bst_ulong(len(data))))
def set_float_info_npy2d(self, field, data):
"""Set float type property into the DMatrix
for numpy 2d array input
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
data = _maybe_np_slice(data, np.float32)
c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
c_str(field),
c_data,
c_bst_ulong(len(data))))
def set_uint_info(self, field, data):
"""Set uint type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
data = _maybe_np_slice(data, np.uint32)
data, _, _ = _convert_dataframes(data, None, None, field, 'uint32')
data = np.array(data, copy=False, dtype=ctypes.c_uint)
_check_call(_LIB.XGDMatrixSetUIntInfo(self.handle,
c_str(field),
c_array(ctypes.c_uint, data),
c_bst_ulong(len(data))))
def set_interface_info(self, field, data):
"""Set info type property into DMatrix."""
# If we are passed a dataframe, extract the series
if CUDF_INSTALLED and isinstance(data, CUDF_DataFrame):
if len(data.columns) != 1:
raise ValueError(
'Expecting meta-info to contain a single column')
data = data[data.columns[0]]
interface = bytes(json.dumps([data.__cuda_array_interface__],
indent=2), 'utf-8')
_check_call(_LIB.XGDMatrixSetInfoFromInterface(self.handle,
c_str(field),
interface))
def save_binary(self, fname, silent=True):
"""Save DMatrix to an XGBoost buffer. Saved binary can be later loaded
by providing the path to :py:func:`xgboost.DMatrix` as input.
Parameters
----------
fname : string or os.PathLike
Name of the output buffer file.
silent : bool (optional; default: True)
If set, the output is suppressed.
"""
_check_call(_LIB.XGDMatrixSaveBinary(self.handle,
c_str(os_fspath(fname)),
ctypes.c_int(silent)))
def set_label(self, label):
"""Set label of dmatrix
Parameters
----------
label: array like
The label information to be set into DMatrix
"""
if _has_cuda_array_interface(label):
self.set_interface_info('label', label)
else:
self.set_float_info('label', label)
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : array like
Weight for each data point
.. note:: For ranking task, weights are per-group.
In ranking task, one weight is assigned to each group (not each
data point). This is because we only care about the relative
ordering of data points within each group, so it doesn't make
sense to assign weights to individual data points.
"""
if _has_cuda_array_interface(weight):
self.set_interface_info('weight', weight)
else:
self.set_float_info('weight', weight)
def set_base_margin(self, margin):
"""Set base margin of booster to start from.
This can be used to specify a prediction value of existing model to be
base_margin However, remember margin is needed, instead of transformed
prediction e.g. for logistic regression: need to put in value before
logistic transformation see also example/demo.py
Parameters
----------
margin: array like
Prediction margin of each datapoint
"""
if _has_cuda_array_interface(margin):
self.set_interface_info('base_margin', margin)
else:
self.set_float_info('base_margin', margin)
def set_group(self, group):
"""Set group size of DMatrix (used for ranking).
Parameters
----------
group : array like
Group size of each group
"""
if _has_cuda_array_interface(group):
self.set_interface_info('group', group)
else:
self.set_uint_info('group', group)
def get_label(self):
"""Get the label of the DMatrix.
Returns
-------
label : array
"""
return self.get_float_info('label')
def get_weight(self):
"""Get the weight of the DMatrix.
Returns
-------
weight : array
"""
return self.get_float_info('weight')
def get_base_margin(self):
"""Get the base margin of the DMatrix.
Returns
-------
base_margin : float
"""
return self.get_float_info('base_margin')
def num_row(self):
"""Get the number of rows in the DMatrix.
Returns
-------
number of rows : int
"""
ret = c_bst_ulong()
_check_call(_LIB.XGDMatrixNumRow(self.handle,
ctypes.byref(ret)))
return ret.value
def num_col(self):
"""Get the number of columns (features) in the DMatrix.
Returns
-------
number of columns : int
"""
ret = c_bst_ulong()
_check_call(_LIB.XGDMatrixNumCol(self.handle,
ctypes.byref(ret)))
return ret.value
def slice(self, rindex, allow_groups=False):
"""Slice the DMatrix and return a new DMatrix that only contains `rindex`.
Parameters
----------
rindex : list
List of indices to be selected.
allow_groups : boolean
Allow slicing of a matrix with a groups attribute
Returns
-------
res : DMatrix
A new DMatrix containing only selected indices.
"""
res = DMatrix(None, feature_names=self.feature_names,
feature_types=self.feature_types)
res.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixSliceDMatrixEx(self.handle,
c_array(ctypes.c_int, rindex),
c_bst_ulong(len(rindex)),
ctypes.byref(res.handle),
ctypes.c_int(1 if allow_groups else 0)))
return res
@property
def feature_names(self):
"""Get feature names (column labels).
Returns
-------
feature_names : list or None
"""
if self._feature_names is None:
self._feature_names = ['f{0}'.format(i)
for i in range(self.num_col())]
return self._feature_names
@property
def feature_types(self):
"""Get feature types (column types).
Returns
-------
feature_types : list or None
"""
return self._feature_types
@feature_names.setter
def feature_names(self, feature_names):
"""Set feature names (column labels).
Parameters
----------
feature_names : list or None
Labels for features. None will reset existing feature names
"""
if feature_names is not None:
# validate feature name
try:
if not isinstance(feature_names, str):
feature_names = list(feature_names)
else:
feature_names = [feature_names]
except TypeError:
feature_names = [feature_names]
if len(feature_names) != len(set(feature_names)):
raise ValueError('feature_names must be unique')
if len(feature_names) != self.num_col():
msg = 'feature_names must have the same length as data'
raise ValueError(msg)
# prohibit to use symbols may affect to parse. e.g. []<
if not all(isinstance(f, STRING_TYPES) and
not any(x in f for x in set(('[', ']', '<')))
for f in feature_names):
raise ValueError('feature_names must be string, and may not contain [, ] or <')
else:
# reset feature_types also
self.feature_types = None
self._feature_names = feature_names
@feature_types.setter
def feature_types(self, feature_types):
"""Set feature types (column types).
This is for displaying the results and unrelated
to the learning process.
Parameters
----------
feature_types : list or None
Labels for features. None will reset existing feature names
"""
if feature_types is not None:
if self._feature_names is None:
msg = 'Unable to set feature types before setting names'
raise ValueError(msg)
if isinstance(feature_types, STRING_TYPES):
# single string will be applied to all columns
feature_types = [feature_types] * self.num_col()
try:
if not isinstance(feature_types, str):
feature_types = list(feature_types)
else:
feature_types = [feature_types]
except TypeError:
feature_types = [feature_types]
if len(feature_types) != self.num_col():
msg = 'feature_types must have the same length as data'
raise ValueError(msg)
valid = ('int', 'float', 'i', 'q')
if not all(isinstance(f, STRING_TYPES) and f in valid
for f in feature_types):
raise ValueError('All feature_names must be {int, float, i, q}')
self._feature_types = feature_types
class Booster(object):
# pylint: disable=too-many-public-methods
"""A Booster of XGBoost.
Booster is the model of xgboost, that contains low level routines for
training, prediction and evaluation.
"""
feature_names = None
def __init__(self, params=None, cache=(), model_file=None):
# pylint: disable=invalid-name
"""
Parameters
----------
params : dict
Parameters for boosters.
cache : list
List of cache items.
model_file : string or os.PathLike
Path to the model file.
"""
for d in cache:
if not isinstance(d, DMatrix):
raise TypeError('invalid cache item: {}'.format(type(d).__name__), cache)
self._validate_features(d)
dmats = c_array(ctypes.c_void_p, [d.handle for d in cache])
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGBoosterCreate(dmats, c_bst_ulong(len(cache)),
ctypes.byref(self.handle)))
if isinstance(params, dict) and \
'validate_parameters' not in params.keys():
params['validate_parameters'] = 1
self.set_param(params or {})
if (params is not None) and ('booster' in params):
self.booster = params['booster']
else:
self.booster = 'gbtree'
if isinstance(model_file, Booster):
assert self.handle is not None
# We use the pickle interface for getting memory snapshot from
# another model, and load the snapshot with this booster.
state = model_file.__getstate__()
handle = state['handle']
del state['handle']
ptr = (ctypes.c_char * len(handle)).from_buffer(handle)
length = c_bst_ulong(len(handle))
_check_call(
_LIB.XGBoosterUnserializeFromBuffer(self.handle, ptr, length))
self.__dict__.update(state)
elif isinstance(model_file, (STRING_TYPES, os_PathLike)):
self.load_model(model_file)
elif model_file is None:
pass
else:
raise TypeError('Unknown type:', model_file)
def __del__(self):
if hasattr(self, 'handle') and self.handle is not None:
_check_call(_LIB.XGBoosterFree(self.handle))
self.handle = None
def __getstate__(self):
# can't pickle ctypes pointers, put model content in bytearray
this = self.__dict__.copy()
handle = this['handle']
if handle is not None:
length = c_bst_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(_LIB.XGBoosterSerializeToBuffer(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
buf = ctypes2buffer(cptr, length.value)
this["handle"] = buf
return this
def __setstate__(self, state):
# reconstruct handle from raw data
handle = state['handle']
if handle is not None:
buf = handle
dmats = c_array(ctypes.c_void_p, [])
handle = ctypes.c_void_p()
_check_call(_LIB.XGBoosterCreate(
dmats, c_bst_ulong(0), ctypes.byref(handle)))
length = c_bst_ulong(len(buf))
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
_check_call(
_LIB.XGBoosterUnserializeFromBuffer(handle, ptr, length))
state['handle'] = handle
self.__dict__.update(state)
def save_config(self):
'''Output internal parameter configuration of Booster as a JSON
string.'''
json_string = ctypes.c_char_p()
length = c_bst_ulong()
_check_call(_LIB.XGBoosterSaveJsonConfig(
self.handle,
ctypes.byref(length),
ctypes.byref(json_string)))
json_string = json_string.value.decode()
return json_string
def load_config(self, config):
'''Load configuration returned by `save_config`.'''
assert isinstance(config, str)
_check_call(_LIB.XGBoosterLoadJsonConfig(
self.handle,
c_str(config)))
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
'''Return a copy of booster. Caches for DMatrix are not copied so continue
training on copied booster will result in lower performance and
slightly different result.
'''
return Booster(model_file=self)
def copy(self):
"""Copy the booster object.
Returns
-------
booster: `Booster`
a copied booster model
"""
return self.__copy__()
def load_rabit_checkpoint(self):
"""Initialize the model by load from rabit checkpoint.
Returns
-------
version: integer
The version number of the model.
"""
version = ctypes.c_int()
_check_call(_LIB.XGBoosterLoadRabitCheckpoint(
self.handle, ctypes.byref(version)))
return version.value
def save_rabit_checkpoint(self):
"""Save the current booster to rabit checkpoint."""
_check_call(_LIB.XGBoosterSaveRabitCheckpoint(self.handle))
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
_check_call(_LIB.XGBoosterGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
return None
def attributes(self):
"""Get attributes stored in the Booster as a dictionary.
Returns
-------
result : dictionary of attribute_name: attribute_value pairs of strings.
Returns an empty dict if there's no attributes.
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(_LIB.XGBoosterGetAttrNames(self.handle,
ctypes.byref(length),
ctypes.byref(sarr)))
attr_names = from_cstr_to_pystr(sarr, length)
return {n: self.attr(n) for n in attr_names}
def set_attr(self, **kwargs):
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, STRING_TYPES):
raise ValueError("Set Attr only accepts string values")
value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(
self.handle, c_str(key), value))
def set_param(self, params, value=None):
"""Set parameters into the Booster.
Parameters
----------
params: dict/list/str
list of key,value pairs, dict of key to value or simply str key
value: optional
value of the specified parameter, when params is str key
"""
if isinstance(params, Mapping):
params = params.items()
elif isinstance(params, STRING_TYPES) and value is not None:
params = [(params, value)]
for key, val in params:
_check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val))))
def update(self, dtrain, iteration, fobj=None):
"""Update for one iteration, with objective function calculated
internally. This function should not be called directly by users.
Parameters
----------
dtrain : DMatrix
Training data.
iteration : int
Current iteration number.
fobj : function
Customized objective function.
"""
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(
type(dtrain).__name__))
self._validate_features(dtrain)
if fobj is None:
_check_call(_LIB.XGBoosterUpdateOneIter(self.handle,
ctypes.c_int(iteration),
dtrain.handle))
else:
pred = self.predict(dtrain, training=True)
grad, hess = fobj(pred, dtrain)
self.boost(dtrain, grad, hess)
def boost(self, dtrain, grad, hess):
"""Boost the booster for one iteration, with customized gradient
statistics. Like :func:`xgboost.core.Booster.update`, this
function should not be called directly by users.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
"""
if len(grad) != len(hess):
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
_check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
c_bst_ulong(len(grad))))
def eval_set(self, evals, iteration=0, feval=None):
# pylint: disable=invalid-name
"""Evaluate a set of data.
Parameters
----------
evals : list of tuples (DMatrix, string)
List of items to be evaluated.
iteration : int
Current iteration.
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result string.
"""
for d in evals:
if not isinstance(d[0], DMatrix):
raise TypeError('expected DMatrix, got {}'.format(
type(d[0]).__name__))
if not isinstance(d[1], STRING_TYPES):
raise TypeError('expected string, got {}'.format(
type(d[1]).__name__))
self._validate_features(d[0])
dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])
evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])
msg = ctypes.c_char_p()
_check_call(_LIB.XGBoosterEvalOneIter(self.handle,
ctypes.c_int(iteration),
dmats, evnames,
c_bst_ulong(len(evals)),
ctypes.byref(msg)))
res = msg.value.decode()
if feval is not None:
for dmat, evname in evals:
feval_ret = feval(self.predict(dmat, training=False), dmat)
if isinstance(feval_ret, list):
for name, val in feval_ret:
res += '\t%s-%s:%f' % (evname, name, val)
else:
name, val = feval_ret
res += '\t%s-%s:%f' % (evname, name, val)
return res
def eval(self, data, name='eval', iteration=0):
"""Evaluate the model on mat.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
name : str, optional
The name of the dataset.
iteration : int, optional
The current iteration number.
Returns
-------
result: str
Evaluation result string.
"""
self._validate_features(data)
return self.eval_set([(data, name)], iteration)
def predict(self,
data,
output_margin=False,
ntree_limit=0,
pred_leaf=False,
pred_contribs=False,
approx_contribs=False,
pred_interactions=False,
validate_features=True,
training=False):
"""Predict with data.
.. note:: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call
``bst.copy()`` to make copies of model object and then call
``predict()``.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all
trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample,
ntrees) with each record indicating the predicted leaf index of
each sample in each tree. Note that the leaf index of a tree is
unique per tree, so you may find leaf 1 in both tree 1 and tree 0.
pred_contribs : bool
When this is True the output will be a matrix of size (nsample,
nfeats + 1) with each record indicating the feature contributions
(SHAP values) for that prediction. The sum of all feature
contributions is equal to the raw untransformed margin value of the
prediction. Note the final column is the bias term.
approx_contribs : bool
Approximate the contributions of each feature
pred_interactions : bool
When this is True the output will be a matrix of size (nsample,
nfeats + 1, nfeats + 1) indicating the SHAP interaction values for
each pair of features. The sum of each row (or column) of the
interaction values equals the corresponding SHAP value (from
pred_contribs), and the sum of the entire matrix equals the raw
untransformed margin value of the prediction. Note the last row and
column correspond to the bias term.
validate_features : bool
When this is True, validate that the Booster's and data's
feature_names are identical. Otherwise, it is assumed that the
feature_names are the same.
training : bool
Whether the prediction value is used for training. This can effect
`dart` booster, which performs dropouts during training iterations.
.. note:: Using ``predict()`` with DART booster
If the booster object is DART type, ``predict()`` will not perform
dropouts, i.e. all the trees will be evaluated. If you want to
obtain result with dropouts, provide `training=True`.
Returns
-------
prediction : numpy array
"""
option_mask = 0x00
if output_margin:
option_mask |= 0x01
if pred_leaf:
option_mask |= 0x02
if pred_contribs:
option_mask |= 0x04
if approx_contribs:
option_mask |= 0x08
if pred_interactions:
option_mask |= 0x10
if not isinstance(data, DMatrix):
raise TypeError('Expecting data to be a DMatrix object, got: ',
type(data))
if validate_features:
self._validate_features(data)
length = c_bst_ulong()
preds = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGBoosterPredict(self.handle, data.handle,
ctypes.c_int(option_mask),
ctypes.c_uint(ntree_limit),
ctypes.c_int(training),
ctypes.byref(length),
ctypes.byref(preds)))
preds = ctypes2numpy(preds, length.value, np.float32)
if pred_leaf:
preds = preds.astype(np.int32)
nrow = data.num_row()
if preds.size != nrow and preds.size % nrow == 0:
chunk_size = int(preds.size / nrow)
if pred_interactions:
ngroup = int(chunk_size / ((data.num_col() + 1) *
(data.num_col() + 1)))
if ngroup == 1:
preds = preds.reshape(nrow,
data.num_col() + 1,
data.num_col() + 1)
else:
preds = preds.reshape(nrow, ngroup,
data.num_col() + 1,
data.num_col() + 1)
elif pred_contribs:
ngroup = int(chunk_size / (data.num_col() + 1))
if ngroup == 1:
preds = preds.reshape(nrow, data.num_col() + 1)
else:
preds = preds.reshape(nrow, ngroup, data.num_col() + 1)
else:
preds = preds.reshape(nrow, chunk_size)
return preds
def save_model(self, fname):
"""Save the model to a file.
The model is saved in an XGBoost internal format which is universal
among the various XGBoost interfaces. Auxiliary attributes of the
Python Booster object (such as feature_names) will not be saved. To
preserve all attributes, pickle the Booster object.
Parameters
----------
fname : string or os.PathLike
Output file name
"""
if isinstance(fname, (STRING_TYPES, os_PathLike)): # assume file name
_check_call(_LIB.XGBoosterSaveModel(
self.handle, c_str(os_fspath(fname))))
else:
raise TypeError("fname must be a string or os_PathLike")
def save_raw(self):
"""Save the model to a in memory buffer representation
Returns
-------
a in memory buffer representation of the model
"""
length = c_bst_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(_LIB.XGBoosterGetModelRaw(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
return ctypes2buffer(cptr, length.value)
def load_model(self, fname):
"""Load the model from a file, local or as URI.
The model is loaded from an XGBoost format which is universal among the
various XGBoost interfaces. Auxiliary attributes of the Python Booster
object (such as feature_names) will not be loaded. To preserve all
attributes, pickle the Booster object.
Parameters
----------
fname : string, os.PathLike, or a memory buffer
Input file name or memory buffer(see also save_raw)
"""
if isinstance(fname, (STRING_TYPES, os_PathLike)):
# assume file name, cannot use os.path.exist to check, file can be
# from URL.
_check_call(_LIB.XGBoosterLoadModel(
self.handle, c_str(os_fspath(fname))))
else:
raise TypeError('Unknown file type: ', fname)
def dump_model(self, fout, fmap='', with_stats=False, dump_format="text"):
"""Dump model into a text or JSON file.
Parameters
----------
fout : string or os.PathLike
Output file name.
fmap : string or os.PathLike, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump file. Can be 'text' or 'json'.
"""
if isinstance(fout, (STRING_TYPES, os_PathLike)):
fout = open(os_fspath(fout), 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats, dump_format)
if dump_format == 'json':
fout.write('[\n')
for i, _ in enumerate(ret):
fout.write(ret[i])
if i < len(ret) - 1:
fout.write(",\n")
fout.write('\n]')
else:
for i, _ in enumerate(ret):
fout.write('booster[{}]:\n'.format(i))
fout.write(ret[i])
if need_close:
fout.close()
def get_dump(self, fmap='', with_stats=False, dump_format="text"):
"""Returns the model dump as a list of strings.
Parameters
----------
fmap : string or os.PathLike, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump. Can be 'text', 'json' or 'dot'.
"""
fmap = os_fspath(fmap)
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == '':
flen = len(self.feature_names)
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
# use quantitative as default
# {'q': quantitative, 'i': indicator}
ftype = from_pystr_to_cstr(['q'] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelExWithFeatures(
self.handle,
ctypes.c_int(flen),
fname,
ftype,
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != '' and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModelEx(self.handle,
c_str(fmap),
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res
def get_fscore(self, fmap=''):
"""Get feature importance of each feature.
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
.. note:: Zero-importance features will not be included
Keep in mind that this function does not include zero-importance feature, i.e.
those features that have not been used in any split conditions.
Parameters
----------
fmap: str or os.PathLike (optional)
The name of feature map file
"""
return self.get_score(fmap, importance_type='weight')
def get_score(self, fmap='', importance_type='weight'):
"""Get feature importance of each feature.
Importance type can be defined as:
* 'weight': the number of times a feature is used to split the data across all trees.
* 'gain': the average gain across all splits the feature is used in.
* 'cover': the average coverage across all splits the feature is used in.
* 'total_gain': the total gain across all splits the feature is used in.
* 'total_cover': the total coverage across all splits the feature is used in.
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Parameters
----------
fmap: str or os.PathLike (optional)
The name of feature map file.
importance_type: str, default 'weight'
One of the importance types defined above.
"""
fmap = os_fspath(fmap)
if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:
raise ValueError('Feature importance is not defined for Booster type {}'
.format(self.booster))
allowed_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover']
if importance_type not in allowed_importance_types:
msg = ("importance_type mismatch, got '{}', expected one of " +
repr(allowed_importance_types))
raise ValueError(msg.format(importance_type))
# if it's weight, then omap stores the number of missing values
if importance_type == 'weight':
# do a simpler tree dump to save time
trees = self.get_dump(fmap, with_stats=False)
fmap = {}
for tree in trees:
for line in tree.split('\n'):
# look for the opening square bracket
arr = line.split('[')
# if no opening bracket (leaf node), ignore this line
if len(arr) == 1:
continue
# extract feature name from string between []
fid = arr[1].split(']')[0].split('<')[0]
if fid not in fmap:
# if the feature hasn't been seen yet
fmap[fid] = 1
else:
fmap[fid] += 1
return fmap
average_over_splits = True
if importance_type == 'total_gain':
importance_type = 'gain'
average_over_splits = False
elif importance_type == 'total_cover':
importance_type = 'cover'
average_over_splits = False
trees = self.get_dump(fmap, with_stats=True)
importance_type += '='
fmap = {}
gmap = {}
for tree in trees:
for line in tree.split('\n'):
# look for the opening square bracket
arr = line.split('[')
# if no opening bracket (leaf node), ignore this line
if len(arr) == 1:
continue
# look for the closing bracket, extract only info within that bracket
fid = arr[1].split(']')
# extract gain or cover from string after closing bracket
g = float(fid[1].split(importance_type)[1].split(',')[0])
# extract feature name from string before closing bracket
fid = fid[0].split('<')[0]
if fid not in fmap:
# if the feature hasn't been seen yet
fmap[fid] = 1
gmap[fid] = g
else:
fmap[fid] += 1
gmap[fid] += g
# calculate average value (gain/cover) for each feature
if average_over_splits:
for fid in gmap:
gmap[fid] = gmap[fid] / fmap[fid]
return gmap
def trees_to_dataframe(self, fmap=''):
"""Parse a boosted tree model text dump into a pandas DataFrame structure.
This feature is only defined when the decision tree model is chosen as base
learner (`booster in {gbtree, dart}`). It is not defined for other base learner
types, such as linear learners (`booster=gblinear`).
Parameters
----------
fmap: str or os.PathLike (optional)
The name of feature map file.
"""
# pylint: disable=too-many-locals
fmap = os_fspath(fmap)
if not PANDAS_INSTALLED:
raise Exception(('pandas must be available to use this method.'
'Install pandas before calling again.'))
if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:
raise ValueError('This method is not defined for Booster type {}'
.format(self.booster))
tree_ids = []
node_ids = []
fids = []
splits = []
y_directs = []
n_directs = []
missings = []
gains = []
covers = []
trees = self.get_dump(fmap, with_stats=True)
for i, tree in enumerate(trees):
for line in tree.split('\n'):
arr = line.split('[')
# Leaf node
if len(arr) == 1:
# Last element of line.split is an empy string
if arr == ['']:
continue
# parse string
parse = arr[0].split(':')
stats = re.split('=|,', parse[1])
# append to lists
tree_ids.append(i)
node_ids.append(int(re.findall(r'\b\d+\b', parse[0])[0]))
fids.append('Leaf')
splits.append(float('NAN'))
y_directs.append(float('NAN'))
n_directs.append(float('NAN'))
missings.append(float('NAN'))
gains.append(float(stats[1]))
covers.append(float(stats[3]))
# Not a Leaf Node
else:
# parse string
fid = arr[1].split(']')
parse = fid[0].split('<')
stats = re.split('=|,', fid[1])
# append to lists
tree_ids.append(i)
node_ids.append(int(re.findall(r'\b\d+\b', arr[0])[0]))
fids.append(parse[0])
splits.append(float(parse[1]))
str_i = str(i)
y_directs.append(str_i + '-' + stats[1])
n_directs.append(str_i + '-' + stats[3])
missings.append(str_i + '-' + stats[5])
gains.append(float(stats[7]))
covers.append(float(stats[9]))
ids = [str(t_id) + '-' + str(n_id) for t_id, n_id in zip(tree_ids, node_ids)]
df = DataFrame({'Tree': tree_ids, 'Node': node_ids, 'ID': ids,
'Feature': fids, 'Split': splits, 'Yes': y_directs,
'No': n_directs, 'Missing': missings, 'Gain': gains,
'Cover': covers})
if callable(getattr(df, 'sort_values', None)):
# pylint: disable=no-member
return df.sort_values(['Tree', 'Node']).reset_index(drop=True)
# pylint: disable=no-member
return df.sort(['Tree', 'Node']).reset_index(drop=True)
def _validate_features(self, data):
"""
Validate Booster and data's feature_names are identical.
Set feature_names and feature_types from DMatrix
"""
if self.feature_names is None:
self.feature_names = data.feature_names
self.feature_types = data.feature_types
else:
# Booster can't accept data with different feature names
if self.feature_names != data.feature_names:
dat_missing = set(self.feature_names) - set(data.feature_names)
my_missing = set(data.feature_names) - set(self.feature_names)
msg = 'feature_names mismatch: {0} {1}'
if dat_missing:
msg += ('\nexpected ' + ', '.join(
str(s) for s in dat_missing) + ' in input data')
if my_missing:
msg += ('\ntraining data did not have the following fields: ' +
', '.join(str(s) for s in my_missing))
raise ValueError(msg.format(self.feature_names,
data.feature_names))
def get_split_value_histogram(self, feature, fmap='', bins=None,
as_pandas=True):
"""Get split value histogram of a feature
Parameters
----------
feature: str
The name of the feature.
fmap: str or os.PathLike (optional)
The name of feature map file.
bin: int, default None
The maximum number of bins.
Number of bins equals number of unique split values n_unique,
if bins == None or bins > n_unique.
as_pandas: bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return numpy ndarray.
Returns
-------
a histogram of used splitting values for the specified feature
either as numpy array or pandas DataFrame.
"""
xgdump = self.get_dump(fmap=fmap)
values = []
regexp = re.compile(r"\[{0}<([\d.Ee+-]+)\]".format(feature))
for i, _ in enumerate(xgdump):
m = re.findall(regexp, xgdump[i])
values.extend([float(x) for x in m])
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
nph = np.histogram(values, bins=bins)
nph = np.column_stack((nph[1][1:], nph[0]))
nph = nph[nph[:, 1] > 0]
if as_pandas and PANDAS_INSTALLED:
return DataFrame(nph, columns=['SplitValue', 'Count'])
if as_pandas and not PANDAS_INSTALLED:
sys.stderr.write(
"Returning histogram as ndarray (as_pandas == True, but pandas is not installed).")
return nph
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
examples/user/update/updateAnAccount.go
|
package example
import (
"fmt"
"os"
"go.m3o.com/user"
)
// Update the account username or email
func UpdateAnAccount() {
userService := user.NewUserService(os.Getenv("M3O_API_TOKEN"))
rsp, err := userService.Update(&user.UpdateRequest{
Email: "[email protected]",
Id: "usrid-1",
})
fmt.Println(rsp, err)
}
|
[
"\"M3O_API_TOKEN\""
] |
[] |
[
"M3O_API_TOKEN"
] |
[]
|
["M3O_API_TOKEN"]
|
go
| 1 | 0 | |
vendor/github.com/armon/consul-api/acl_test.go
|
package consulapi
import (
"os"
"testing"
)
// ROOT is a management token for the tests
var CONSUL_ROOT string
func init() {
CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
}
func TestACL_CreateDestroy(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
ae := ACLEntry{
Name: "API test",
Type: ACLClientType,
Rules: `key "" { policy = "deny" }`,
}
id, wm, err := acl.Create(&ae, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
ae2, _, err := acl.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
t.Fatalf("Bad: %#v", ae2)
}
wm, err = acl.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
}
func TestACL_CloneDestroy(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
id, wm, err := acl.Clone(CONSUL_ROOT, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
if id == "" {
t.Fatalf("invalid: %v", id)
}
wm, err = acl.Destroy(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if wm.RequestTime == 0 {
t.Fatalf("bad: %v", wm)
}
}
func TestACL_Info(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
ae, qm, err := acl.Info(CONSUL_ROOT, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType {
t.Fatalf("bad: %#v", ae)
}
}
func TestACL_List(t *testing.T) {
if CONSUL_ROOT == "" {
t.SkipNow()
}
c := makeClient(t)
c.config.Token = CONSUL_ROOT
acl := c.ACL()
acls, qm, err := acl.List(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(acls) < 2 {
t.Fatalf("bad: %v", acls)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
}
|
[
"\"CONSUL_ROOT\""
] |
[] |
[
"CONSUL_ROOT"
] |
[]
|
["CONSUL_ROOT"]
|
go
| 1 | 0 | |
vendor/golang.org/x/sys/unix/mkpost.go
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// mkpost processes the output of cgo -godefs to
// modify the generated types. It is used to clean up
// the sys API in an architecture specific manner.
//
// mkpost is run after cgo -godefs; see README.md.
package main
import (
"bytes"
"fmt"
"go/format"
"io/ioutil"
"log"
"os"
"regexp"
)
func main() {
// Get the OS and architecture (using GOARCH_TARGET if it exists)
goos := os.Getenv("GOOS")
goarch := os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check that we are using the new build system if we should be.
if goos == "linux" && goarch != "sparc64" {
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
os.Stderr.WriteString("In the new build system, mkpost should not be called directly.\n")
os.Stderr.WriteString("See README.md\n")
os.Exit(1)
}
}
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
// Intentionally export __val fields in Fsid and Sigset_t
valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`)
b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}"))
// If we have empty Ptrace structs, we should delete them. Only s390x emits
// nonempty Ptrace structs.
ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
b = ptraceRexexp.ReplaceAll(b, nil)
// Replace the control_regs union with a blank identifier for now.
controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
// Remove fields that are added by glibc
// Note that this is unstable as the identifers are private.
removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// Convert [65]int8 to [65]byte in Utsname members to simplify
// conversion to string; see golang.org/issue/20753
convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
// Convert [1024]int8 to [1024]byte in Ptmget members
convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
// Remove spare fields (e.g. in Statx_t)
spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove cgo padding fields
removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove padding, hidden, or unused fields
removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove the first line of warning from cgo
b = b[bytes.IndexByte(b, '\n')+1:]
// Modify the command in the header to include:
// mkpost, our own warning, and a build tag.
replacement := fmt.Sprintf(`$1 | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s,%s`, goarch, goos)
cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
// gofmt
b, err = format.Source(b)
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(b)
}
|
[
"\"GOOS\"",
"\"GOARCH_TARGET\"",
"\"GOARCH\"",
"\"GOLANG_SYS_BUILD\""
] |
[] |
[
"GOARCH_TARGET",
"GOARCH",
"GOOS",
"GOLANG_SYS_BUILD"
] |
[]
|
["GOARCH_TARGET", "GOARCH", "GOOS", "GOLANG_SYS_BUILD"]
|
go
| 4 | 0 | |
tests/python/unittest/test_runtime_rpc.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import os
import logging
import time
import multiprocessing
import numpy as np
from tvm import rpc
from tvm.contrib import util
from tvm.rpc.tracker import Tracker
def test_bigendian_rpc():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_rpc(remote, target, shape, dtype):
A = tvm.placeholder(shape, dtype=dtype)
B = tvm.compute(A.shape, lambda i: A[i]+tvm.const(1, A.dtype))
s = tvm.create_schedule(B.op)
f = tvm.build(s, [A, B], target, name="myadd")
ctx = remote.cpu(0)
a = tvm.nd.array(np.random.randint(0, 256, size=shape).astype(A.dtype), ctx=ctx)
b = tvm.nd.array(np.zeros(shape).astype(A.dtype), ctx=ctx)
temp = util.tempdir()
path_dso = temp.relpath("dev_lib.o")
f.save(path_dso)
remote.upload(path_dso)
f = remote.load_module("dev_lib.o")
f(a, b)
tvm.testing.assert_allclose(a.asnumpy() + 1, b.asnumpy())
print("Test RPC connection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_rpc(remote, target, (10,), dtype)
def test_rpc_simple():
if not tvm.runtime.enabled("rpc"):
return
@tvm.register_func("rpc.test.addone")
def addone(x):
return x + 1
@tvm.register_func("rpc.test.strcat")
def strcat(name, x):
return "%s:%d" % (name, x)
@tvm.register_func("rpc.test.except")
def remotethrow(name):
raise ValueError("%s" % name)
server = rpc.Server("localhost", key="x1")
client = rpc.connect(server.host, server.port, key="x1")
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
try:
f3("abc")
assert False
except tvm.error.TVMError as e:
assert "abc" in str(e)
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "abc:11"
def test_rpc_array():
if not tvm.runtime.enabled("rpc"):
return
x = np.random.randint(0, 10, size=(3, 4))
@tvm.register_func("rpc.test.remote_array_func")
def remote_array_func(y):
np.testing.assert_equal(y.asnumpy(), x)
server = rpc.Server("localhost")
remote = rpc.connect(server.host, server.port)
r_cpu = tvm.nd.array(x, remote.cpu(0))
assert str(r_cpu.context).startswith("remote")
np.testing.assert_equal(r_cpu.asnumpy(), x)
fremote = remote.get_function("rpc.test.remote_array_func")
fremote(r_cpu)
def test_rpc_file_exchange():
if not tvm.runtime.enabled("rpc"):
return
server = rpc.Server("localhost")
remote = rpc.connect(server.host, server.port)
blob = bytearray(np.random.randint(0, 10, size=(10)))
remote.upload(blob, "dat.bin")
rev = remote.download("dat.bin")
assert(rev == blob)
def test_rpc_remote_module():
if not tvm.runtime.enabled("rpc"):
return
server = rpc.Server("localhost")
client = rpc.connect(server.host, server.port)
# graph
n = tvm.convert(1024)
A = tvm.placeholder((n,), name='A')
B = tvm.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
s = tvm.create_schedule(B.op)
def check_remote(remote):
if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
temp = util.tempdir()
ctx = remote.cpu(0)
f = tvm.build(s, [A, B], "llvm", name="myadd")
path_dso = temp.relpath("dev_lib.so")
f.export_library(path_dso)
remote.upload(path_dso)
f1 = remote.load_module("dev_lib.so")
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx)
time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10)
cost = time_f(a, b).mean
print('%g secs/op' % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
def check_remote_link_cl(remote):
"""Test function to run remote code such as cl
This is not enabled because there is forking issue
of TVM runtime when server launches after OpenCL
runtime initializes. We leave it as an example
on how to do rpc when we want to do linking on remote.
"""
if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
if not tvm.runtime.enabled("opencl"):
print("Skip because opencl is not enabled")
return
temp = util.tempdir()
ctx = remote.cl(0)
s = tvm.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xo, tvm.thread_axis("blockIdx.x"))
s[B].bind(xi, tvm.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], "opencl", target_host="llvm", name="myadd")
# Option 1: save modules separately and rely on remote compiler
path_o = temp.relpath("myadd.o")
path_cl = temp.relpath("myadd.cl")
path_json = temp.relpath("myadd.tvm_meta.json")
f.save(path_o)
f.imported_modules[0].save(path_cl)
remote.upload(path_o)
remote.upload(path_cl)
# upload meta data
remote.upload(path_json)
fhost = remote.load_module("myadd.o")
fdev = remote.load_module("myadd.cl")
fhost.import_module(fdev)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx)
fhost(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
# Option 2: export library as a tar ball then handled by remote compiler
path_tar = temp.relpath("myadd.tar")
f.export_library(path_tar)
remote.upload(path_tar)
fhost = remote.load_module("myadd.tar")
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx)
fhost(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
check_remote(client)
check_remote(rpc.LocalSession())
def test_rpc_return_func():
@tvm.register_func("rpc.test.remote_func")
def addone(x):
return lambda y: x+y
server = rpc.Server("localhost", key="x1")
client = rpc.connect(server.host, server.port, key="x1")
f1 = client.get_function("rpc.test.remote_func")
fadd = f1(10)
assert fadd(12) == 22
def test_rpc_return_ndarray():
# Use closure to check the ref counter correctness
nd = tvm.nd.array(np.zeros(10).astype("float32"))
@tvm.register_func("rpc.test.remote_return_nd")
def my_module(name):
if name == "get_arr":
return lambda : nd
elif name == "ref_count":
return lambda : tvm._api_internal._ndarray_use_count(nd)
elif name == "get_elem":
return lambda idx: nd.asnumpy()[idx]
elif name == "get_arr_elem":
return lambda arr, idx: arr.asnumpy()[idx]
# start server
server = rpc.Server("localhost", key="x1")
client = rpc.connect(server.host, server.port, key="x1")
m = client.get_function("rpc.test.remote_return_nd")
get_arr = m("get_arr")
ref_count = m("ref_count")
get_elem = m("get_elem")
get_arr_elem = m("get_arr_elem")
# array test
def run_arr_test():
arr = get_arr()
assert ref_count() == 2
arr2 = get_arr()
assert ref_count() == 3
assert arr.context == client.cpu(0)
arr.copyfrom(np.ones(10).astype(arr.dtype))
assert arr2.asnumpy()[0] == 1.0
assert get_elem(0) == 1.0
assert get_arr_elem(arr2, 0) == 1.0
assert ref_count() == 1
run_arr_test()
# check recycle correctness
assert ref_count() == 1
def test_local_func():
@tvm.register_func("rpc.test.remote_func2")
def addone(x):
return lambda y: x+y
client = rpc.LocalSession()
f1 = client.get_function("rpc.test.remote_func2")
fadd = f1(10)
assert fadd(12) == 22
blob = bytearray(np.random.randint(0, 10, size=(10)))
client.upload(blob, "dat.bin")
rev = client.download("dat.bin")
assert rev == blob
def test_rpc_tracker_register():
# test registration
tracker = Tracker('localhost', port=9000, port_end=10000)
device_key = 'test_device'
server = rpc.Server('localhost', port=9000, port_end=10000,
key=device_key,
tracker_addr=(tracker.host, tracker.port))
time.sleep(1)
client = rpc.connect_tracker(tracker.host, tracker.port)
summary = client.summary()
assert summary['queue_info'][device_key]['free'] == 1
remote = client.request(device_key)
summary = client.summary()
assert summary['queue_info'][device_key]['free'] == 0
del remote
time.sleep(1)
summary = client.summary()
assert summary['queue_info'][device_key]['free'] == 1
server.terminate()
time.sleep(1)
summary = client.summary()
assert summary['queue_info'][device_key]['free'] == 0
tracker.terminate()
def test_rpc_tracker_request():
# test concurrent request
tracker = Tracker('localhost', port=9000, port_end=10000)
device_key = 'test_device'
server = rpc.Server('localhost', port=9000, port_end=10000,
key=device_key,
tracker_addr=(tracker.host, tracker.port))
client = rpc.connect_tracker(tracker.host, tracker.port)
def target(host, port, device_key, timeout):
client = rpc.connect_tracker(host, port)
remote = client.request(device_key, session_timeout=timeout)
while True:
pass
remote.cpu()
proc1 = multiprocessing.Process(target=target,
args=(tracker.host, tracker.port, device_key, 4))
proc2 = multiprocessing.Process(target=target,
args=(tracker.host, tracker.port, device_key, 200))
proc1.start()
time.sleep(0.5)
proc2.start()
time.sleep(0.5)
summary = client.summary()
assert summary['queue_info'][device_key]['free'] == 0
assert summary['queue_info'][device_key]['pending'] == 1
proc1.terminate()
proc1.join()
time.sleep(0.5)
summary = client.summary()
assert summary['queue_info'][device_key]['free'] == 0
assert summary['queue_info'][device_key]['pending'] == 0
proc2.terminate()
proc2.join()
server.terminate()
tracker.terminate()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
test_rpc_return_ndarray()
test_rpc_return_func()
test_bigendian_rpc()
test_rpc_remote_module()
test_rpc_file_exchange()
test_rpc_array()
test_rpc_simple()
test_local_func()
test_rpc_tracker_register()
test_rpc_tracker_request()
|
[] |
[] |
[
"TVM_POWERPC_TEST_HOST",
"TVM_POWERPC_TEST_PORT"
] |
[]
|
["TVM_POWERPC_TEST_HOST", "TVM_POWERPC_TEST_PORT"]
|
python
| 2 | 0 | |
hello.py
|
import re
import requests
from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify
from flask_compress import Compress
import atexit
import os
import json
import pan
app = Flask(__name__, static_url_path='')
Compress(app)
db_name = 'mydb'
client = None
db = None
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif "CLOUDANT_URL" in os.environ:
client = Cloudant(os.environ['CLOUDANT_USERNAME'], os.environ['CLOUDANT_PASSWORD'], url=os.environ['CLOUDANT_URL'],
connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
# On IBM Cloud Cloud Foundry, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8000
port = int(os.getenv('PORT', 8000))
@app.route('/')
def root():
return app.send_static_file('index1.html')
# return render_template('index1.html')
@app.route('/api/month_list/')
def month_list():
print(request)
download_ = get_duty_data()
re_compile = re.compile(r"\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d")
loads = json.loads(download_)
last_change = "0"
file_info = pan.Pan().pan_getlist("advs.txt")
# print(1, last_update)
res_month = set()
for i in loads:
# i_schedule_name_ = {"date": i["UserName"], "name": i["ScheduleDay"], "ScheduleName": i["ScheduleName"]}
# res.append(i_schedule_name_)
# print(i)
schedule_day_: str = i["ScheduleDay"]
res_month.add(schedule_day_[:6])
time_ = i["AddTime"]
if re_compile.match(time_):
if last_change < time_:
format_time = time_[:16]
format_time = format_time.replace("T", " ").replace("-", "/")
last_change = format_time
# print(format_time)
res = sorted(list(res_month))
# return HttpResponse(json.dumps({'month_list': res, 'last_change': last_change, 'file_info': file_info}),
# content_type="application/json,charset=utf-8")
json_dumps = json.dumps({'month_list': res, 'last_change': last_change, 'file_info': file_info})
print(json_dumps)
return json_dumps
def get_duty_data(month=""):
Pan = pan.Pan()
link_ = 'hur5uk7ct706pbkrinw3kgex3sf68c02'
password_ = '1'
link_to_password = Pan.pan_link_to_password(link_, password_)
# path = '班表1'
# filename = 'advs.txt'
# Pan.pan_upload_for_pwd(link_, password_, path, filename, file_data)
if len(month) > 0:
file_name_ = "班表1/duty_{month}".format(month=month)
else:
file_name_ = "班表1/advs.txt"
getlist_ = Pan.pan_getlist_for_pwd(link_, link_to_password, file_name_)
download_ = Pan.pan_download_for_pwd(getlist_)
return download_
def get_holidays_data(yeah):
# requests_get = requests.get(
# "https://natescarlet.coding.net/p/github/d/holiday-cn/git/raw/master/{}.json".format(yeah))
requests_get = requests.get(
"https://natescarlet.coding.net/p/github/d/holiday-cn/git/raw/master/{}.json".format(yeah))
return requests_get.text
@app.route('/api/holidays_list/')
def holidays_list():
get_yeah = request.args.get("yeah")
download_ = get_holidays_data(get_yeah)
# loads = json.loads(download_, encoding='GB2312')
# return HttpResponse(download_)
# print(json.dumps(download,_ensure_ascii=False, encoding='utf-8'))
# download_ = json.dumps(loads, encoding='utf-8')
# print(download_)
return download_
# return render(request, "index.html")
@app.route('/api/duty_table/')
def duty_table():
get_month = request.args.get("month")
download_ = get_duty_data(month=get_month)
# print(download_)
return download_
loads = json.loads(download_)
res = {}
res_date = set()
for i in loads:
# i_schedule_name_ = {"date": i["UserName"], "name": i["ScheduleDay"], "ScheduleName": i["ScheduleName"]}
# res.append(i_schedule_name_)
# print(i)
schedule_day_ = i["ScheduleDay"]
if get_month and schedule_day_[:6] == get_month:
res_date.add(schedule_day_)
# UserCode 可能会丢失或错误
# res_get = res.setdefault(i['UserCode'], {'UserName': i["UserName"]})
# UserID 可能会丢失或错误
# res_get = res.setdefault(i['UserID'], {'UserName': i["UserName"]})
res_get = res.setdefault(i['UserName'], {'UserName': i["UserName"]})
res_get[schedule_day_] = i["ScheduleName"]
# if i["UserName"] == "黄文焰":
# print(i)
# res_name = sorted(list(res_name))
# print(res_name)
# res_date = [{'key': 'UserName', 'label': get_month}] + sorted(list(res_date))
# print(res.values())
res = list(res.values())
return json.dumps(res)
# return render(request, "duty_table1.html", {'List': json.dumps(res)})
# /* Endpoint to greet and add a new visitor to database.
# * Send a POST request to localhost:8000/api/visitors with body
# * {
# * "name": "Bob"
# * }
# */
@app.route('/api/visitors', methods=['GET'])
def get_visitor():
if client:
return jsonify(list(map(lambda doc: doc['name'], db)))
else:
print('No database')
return jsonify([])
# /**
# * Endpoint to get a JSON array of all the visitors in the database
# * REST API example:
# * <code>
# * GET http://localhost:8000/api/visitors
# * </code>
# *
# * Response:
# * [ "Bob", "Jane" ]
# * @return An array of all the visitor names
# */
@app.route('/api/visitors', methods=['POST'])
def put_visitor():
user = request.json['name']
data = {'name': user}
if client:
my_document = db.create_document(data)
data['_id'] = my_document['_id']
return jsonify(data)
else:
print('No database')
return jsonify(data)
@atexit.register
def shutdown():
if client:
client.disconnect()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
|
[] |
[] |
[
"PORT",
"VCAP_SERVICES",
"CLOUDANT_URL",
"CLOUDANT_PASSWORD",
"CLOUDANT_USERNAME"
] |
[]
|
["PORT", "VCAP_SERVICES", "CLOUDANT_URL", "CLOUDANT_PASSWORD", "CLOUDANT_USERNAME"]
|
python
| 5 | 0 | |
src/main/java/org/sdw/mapping/RMLmapper.java
|
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*******************************************************************************/
package org.sdw.mapping;
import java.io.BufferedReader;
import java.io.File;
import java.io.InputStreamReader;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.commons.configuration2.Configuration;
import org.sdw.ingestion.DatasetConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Ritesh Kumar Singh
*
*/
public class RMLmapper implements RDFmapper
{
public static final Logger LOG = LoggerFactory.getLogger(RMLmapper.class);
private static String commonRdfFormat;
/**
* Parametrized constructor with single input
* @param cfg : Configuration file for the dataset
*/
public RMLmapper(String commonRdfFormat)
{
this.commonRdfFormat = commonRdfFormat;
}
/**
* Calls the interface's execute method with params set
* @param config : Config of the dataset
*/
public void parallelExecutor(final DatasetConfig datasetConfig, final int numThreads)
{
ExecutorService executor = Executors.newCachedThreadPool();
for(int i=0; i< numThreads; i++)
{
executor.execute(new ParallelExecutor(datasetConfig, i));
}
}
/**
* Implemented from the interface
* @param sourceFile : path to source file
* @param mappingFile : rml mapping file
* @param outputFile : file to create after conversion
*/
public void execute(String sourceFile, String mappingFile, String outputFile)
{
deleteOutputIfExists(outputFile);
String rmlHome = System.getenv("RML_HOME");
String baseDir = System.getProperty("user.dir");
rmlHome = baseDir + "/lib/";
String command = "java -jar "+rmlHome+"RML-Mapper.jar -m " + mappingFile + " -o "+outputFile+" -f " + commonRdfFormat;
String res[] = executeCommandShell(command);
if(Integer.parseInt(res[0]) != 0)
{
LOG.error("ERROR : Could not convert the file to rdf format");
}
else
{
//LOG.info(res[1]);
}
}
/**
* Execute command on local shell
* @param command : Command to be executed
* @return : A string array with exit code and output of execution
*/
private String[] executeCommandShell(String command)
{
LOG.info("Shell command: $" + command);
StringBuffer op = new StringBuffer();
String out[] = new String[2];
Process process;
try
{
process = Runtime.getRuntime().exec(command);
process.waitFor();
int exitStatus = process.exitValue();
out[0] = Integer.toString(exitStatus);
BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line = null;
while ((line = reader.readLine()) != null)
{
op.append(line + "\n");
}
out[1] = op.toString();
}
catch (Exception e)
{
LOG.error(e.getMessage(), e);
}
return out;
}
/**
* Deletes the output file if it already exists
* @param outputFile : delete this file if already exists
*/
public void deleteOutputIfExists(String outputFile)
{
LOG.info("Deleting old output file to replace with new one");
File file = new File(outputFile);
if(file.exists())
{
String command = "rm "+outputFile;
String res[] = executeCommandShell(command);
if(Integer.parseInt(res[0]) != 0)
{
LOG.error("ERROR: Could not delete file: "+outputFile);
}
}
else
{
LOG.info("File to be deleted not found");
}
}
private class ParallelExecutor implements Runnable
{
private int count;
private DatasetConfig datasetConfig;
ParallelExecutor(DatasetConfig datasetConfig, int count)
{
this.datasetConfig = datasetConfig;
this.count = count;
}
@Override
public void run()
{
execute(datasetConfig.getPartFileAbsolutePaths().get(count), datasetConfig.getConfiguration().getString("mappingFile"), datasetConfig.getConfiguration().getString("outputFile"));
}
}
}
|
[
"\"RML_HOME\""
] |
[] |
[
"RML_HOME"
] |
[]
|
["RML_HOME"]
|
java
| 1 | 0 | |
test/webapi/test_websocket.py
|
import unittest
import os
from cate.core.wsmanag import FSWorkspaceManager
from cate.util.monitor import Monitor
from cate.webapi.websocket import WebSocketService
class WebSocketServiceTest(unittest.TestCase):
def setUp(self):
self.service = WebSocketService(FSWorkspaceManager())
@unittest.skipIf(os.environ.get('CATE_DISABLE_WEB_TESTS', None) == '1', 'CATE_DISABLE_WEB_TESTS = 1')
def test_get_data_stores(self):
data_stores = self.service.get_data_stores()
self.assertIsInstance(data_stores, list)
self.assertGreater(len(data_stores), 1)
self.assertIn('local', [ds['id'] for ds in data_stores])
@unittest.skipIf(os.environ.get('CATE_DISABLE_WEB_TESTS', None) == '1', 'CATE_DISABLE_WEB_TESTS = 1')
def test_get_data_sources(self):
data_stores = self.service.get_data_stores()
for ds in data_stores:
data_sources = self.service.get_data_sources(ds['id'], monitor=Monitor.NONE)
self.assertIsInstance(data_sources, list)
def test_get_operations(self):
ops = self.service.get_operations()
self.assertIsInstance(ops, list)
self.assertGreater(len(ops), 20)
self.assertIn('open_dataset', [op['name'] for op in ops])
open_dataset_op = [op for op in ops if op['name'] == 'open_dataset'][0]
keys = sorted(list(open_dataset_op.keys()))
self.assertEqual(keys, ['has_monitor', 'header', 'inputs', 'name', 'outputs', 'qualified_name'])
keys = sorted(list(open_dataset_op['header'].keys()))
self.assertEqual(keys, ['description', 'res_pattern', 'tags'])
names = [props['name'] for props in open_dataset_op['inputs']]
self.assertEqual(names, ['ds_id', 'time_range', 'region', 'var_names', 'normalize',
'force_local', 'local_ds_id'])
names = [props['name'] for props in open_dataset_op['outputs']]
self.assertEqual(names, ['return'])
def test_get_operations_with_deprecations(self):
from cate.core.op import op, op_input, op_output, OpRegistry
registry = OpRegistry()
@op(registry=registry, deprecated=True)
def my_deprecated_op():
pass
@op_input('a', registry=registry)
@op_input('b', registry=registry, deprecated=True)
@op_output('u', registry=registry, deprecated=True)
@op_output('v', registry=registry)
def my_op_with_deprecated_io(a, b=None):
pass
self.assertIsNotNone(registry.get_op(my_deprecated_op, fail_if_not_exists=True))
self.assertIsNotNone(registry.get_op(my_op_with_deprecated_io, fail_if_not_exists=True))
ops = self.service.get_operations(registry=registry)
op_names = {op['name'] for op in ops}
self.assertIn('test.webapi.test_websocket.my_op_with_deprecated_io', op_names)
self.assertNotIn('test.webapi.test_websocket.my_deprecated_op', op_names)
op = [op for op in ops if op['name'] == 'test.webapi.test_websocket.my_op_with_deprecated_io'][0]
self.assertEqual(len(op['inputs']), 1)
self.assertEqual(op['inputs'][0]['name'], 'a')
self.assertEqual(len(op['outputs']), 1)
self.assertEqual(op['outputs'][0]['name'], 'v')
|
[] |
[] |
[
"CATE_DISABLE_WEB_TESTS"
] |
[]
|
["CATE_DISABLE_WEB_TESTS"]
|
python
| 1 | 0 | |
vendor/github.com/containers/common/pkg/config/util_supported.go
|
// +build linux darwin
package config
import (
"fmt"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/containers/storage/pkg/unshare"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var (
rootlessRuntimeDirOnce sync.Once
rootlessRuntimeDir string
)
// getRuntimeDir returns the runtime directory
func getRuntimeDir() (string, error) {
var rootlessRuntimeDirError error
rootlessRuntimeDirOnce.Do(func() {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
st, err := os.Stat(runtimeDir)
if err != nil {
rootlessRuntimeDirError = err
return
}
if int(st.Sys().(*syscall.Stat_t).Uid) != os.Geteuid() {
rootlessRuntimeDirError = fmt.Errorf("XDG_RUNTIME_DIR directory %q is not owned by the current user", runtimeDir)
return
}
}
uid := fmt.Sprintf("%d", unshare.GetRootlessUID())
if runtimeDir == "" {
tmpDir := filepath.Join("/run", "user", uid)
if err := os.MkdirAll(tmpDir, 0700); err != nil {
logrus.Debugf("unable to make temp dir: %v", err)
}
st, err := os.Stat(tmpDir)
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
runtimeDir = tmpDir
}
}
if runtimeDir == "" {
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid))
if err := os.MkdirAll(tmpDir, 0700); err != nil {
logrus.Debugf("unable to make temp dir %v", err)
}
st, err := os.Stat(tmpDir)
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
runtimeDir = tmpDir
}
}
if runtimeDir == "" {
home := os.Getenv("HOME")
if home == "" {
rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
return
}
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
rootlessRuntimeDirError = errors.Wrap(err, "cannot resolve home")
return
}
runtimeDir = filepath.Join(resolvedHome, "rundir")
}
rootlessRuntimeDir = runtimeDir
})
if rootlessRuntimeDirError != nil {
return "", rootlessRuntimeDirError
}
return rootlessRuntimeDir, nil
}
|
[
"\"XDG_RUNTIME_DIR\"",
"\"HOME\""
] |
[] |
[
"XDG_RUNTIME_DIR",
"HOME"
] |
[]
|
["XDG_RUNTIME_DIR", "HOME"]
|
go
| 2 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/131/259/CWE191_Integer_Underflow__int_Environment_multiply_73a.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE191_Integer_Underflow__int_Environment_multiply_73a.java
Label Definition File: CWE191_Integer_Underflow__int.label.xml
Template File: sources-sinks-73a.tmpl.java
*/
/*
* @description
* CWE: 191 Integer Underflow
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: multiply
* GoodSink: Ensure there will not be an underflow before multiplying data by 2
* BadSink : If data is negative, multiply by 2, which can cause an underflow
* Flow Variant: 73 Data flow: data passed in a LinkedList from one method to another in different source files in the same package
*
* */
import java.util.LinkedList;
import java.util.logging.Level;
public class CWE191_Integer_Underflow__int_Environment_multiply_73a extends AbstractTestCase
{
public void bad() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
LinkedList<Integer> dataLinkedList = new LinkedList<Integer>();
dataLinkedList.add(0, data);
dataLinkedList.add(1, data);
dataLinkedList.add(2, data);
(new CWE191_Integer_Underflow__int_Environment_multiply_73b()).badSink(dataLinkedList );
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* goodG2B() - use GoodSource and BadSink */
private void goodG2B() throws Throwable
{
int data;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
LinkedList<Integer> dataLinkedList = new LinkedList<Integer>();
dataLinkedList.add(0, data);
dataLinkedList.add(1, data);
dataLinkedList.add(2, data);
(new CWE191_Integer_Underflow__int_Environment_multiply_73b()).goodG2BSink(dataLinkedList );
}
/* goodB2G() - use BadSource and GoodSink */
private void goodB2G() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
LinkedList<Integer> dataLinkedList = new LinkedList<Integer>();
dataLinkedList.add(0, data);
dataLinkedList.add(1, data);
dataLinkedList.add(2, data);
(new CWE191_Integer_Underflow__int_Environment_multiply_73b()).goodB2GSink(dataLinkedList );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\"",
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
pkg/job/job.go
|
package job
import (
"context"
"crypto/md5"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
shellwords "github.com/mattn/go-shellwords"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/ghodss/yaml"
v1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// Job has client of kubernetes, current job, command, timeout, and target container information.
type Job struct {
client kubernetes.Interface
// Batch v1 job struct.
CurrentJob *v1.Job
// Command which override the current job struct.
Args []string
// Target container name.
Container string
// If you set 0, timeout is ignored.
Timeout time.Duration
}
// NewJob returns a new Job struct, and initialize kubernetes client.
// It read the job definition yaml file, and unmarshal to batch/v1/Job.
func NewJob(configFile, currentFile, command, container string, timeout time.Duration) (*Job, error) {
if len(configFile) == 0 {
return nil, errors.New("Config file is required")
}
if len(currentFile) == 0 {
return nil, errors.New("Template file is required")
}
if len(container) == 0 {
return nil, errors.New("Container is required")
}
client, err := newClient(os.ExpandEnv(configFile))
if err != nil {
return nil, err
}
downloaded, err := downloadFile(currentFile)
if err != nil {
return nil, err
}
bytes, err := ioutil.ReadFile(downloaded)
if err != nil {
return nil, err
}
var currentJob v1.Job
err = yaml.Unmarshal(bytes, ¤tJob)
if err != nil {
return nil, err
}
currentJob.SetName(generateRandomName(currentJob.Name))
p := shellwords.NewParser()
args, err := p.Parse(command)
log.Info("Received args:")
for _, arg := range args {
log.Info(arg)
}
if err != nil {
return nil, err
}
return &Job{
client,
¤tJob,
args,
container,
timeout,
}, nil
}
func downloadFile(rawurl string) (string, error) {
if !strings.HasPrefix(rawurl, "https://") {
return rawurl, nil
}
req, err := http.NewRequest("GET", rawurl, nil)
if err != nil {
return rawurl, err
}
token := os.Getenv("GITHUB_TOKEN")
if len(token) > 0 {
req.Header.Set("Authorization", "token "+token)
req.Header.Set("Accept", "application/vnd.github.v3.raw")
}
client := new(http.Client)
resp, err := client.Do(req)
if err != nil {
return rawurl, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return rawurl, fmt.Errorf("Could not read template file from %s", rawurl)
}
// Get random string from url.
hasher := md5.New()
hasher.Write([]byte(rawurl))
downloaded := "/tmp/" + hex.EncodeToString(hasher.Sum(nil)) + ".yml"
out, err := os.Create(downloaded)
if err != nil {
return rawurl, err
}
defer out.Close()
// Write the body to file
_, err = io.Copy(out, resp.Body)
return downloaded, err
}
func generateRandomName(name string) string {
return fmt.Sprintf("%s-%s", name, secureRandomStr(16))
}
// secureRandomStr is generate random string.
func secureRandomStr(b int) string {
k := make([]byte, b)
if _, err := rand.Read(k); err != nil {
panic(err)
}
return fmt.Sprintf("%x", k)
}
// Validate checks job templates before run the job.
func (j *Job) Validate() error {
_, err := findContainerIndex(j.CurrentJob, j.Container)
return err
}
// RunJob is run a kubernetes job, and returns the job information.
func (j *Job) RunJob() (*v1.Job, error) {
ctx := context.Background()
currentJob := j.CurrentJob.DeepCopy()
index, err := findContainerIndex(currentJob, j.Container)
if err != nil {
return nil, err
}
if len(j.Args) > 0 {
currentJob.Spec.Template.Spec.Containers[index].Args = j.Args
}
resultJob, err := j.client.BatchV1().Jobs(j.CurrentJob.Namespace).Create(ctx, currentJob, metav1.CreateOptions{})
if err != nil {
return nil, err
}
return resultJob, nil
}
// findContainerIndex finds target container from job definition.
func findContainerIndex(job *v1.Job, containerName string) (int, error) {
for index, container := range job.Spec.Template.Spec.Containers {
if container.Name == containerName {
return index, nil
}
}
return 0, fmt.Errorf("Specified container %s does not exist in the template", containerName)
}
// WaitJob waits response of the job.
func (j *Job) WaitJob(ctx context.Context, job *v1.Job, ignoreSidecar bool) error {
log.Info("Waiting for running job...")
errCh := make(chan error, 1)
done := make(chan struct{}, 1)
go func() {
err := j.WaitJobComplete(ctx, job, ignoreSidecar)
if err != nil {
errCh <- err
}
close(done)
}()
select {
case err := <-errCh:
if err != nil {
return err
}
case <-done:
log.Info("Job is succeeded")
case <-ctx.Done():
return errors.New("process timeout")
}
return nil
}
// WaitJobComplete waits the completion of the job.
// If the job is failed, this function returns error.
// If the job is succeeded, this function returns nil.
func (j *Job) WaitJobComplete(ctx context.Context, job *v1.Job, ignoreSidecar bool) error {
retry:
for {
time.Sleep(3 * time.Second)
running, err := j.client.BatchV1().Jobs(job.Namespace).Get(ctx, job.Name, metav1.GetOptions{})
if err != nil {
return err
}
if running.Status.Active == 0 {
return checkJobConditions(running.Status.Conditions)
}
if ignoreSidecar {
pods, err := j.FindPods(ctx, running)
if err != nil {
return err
}
finished, err := checkPodConditions(pods, j.Container)
if finished {
log.Warn("Pod is still running, but specified container is completed, so job will be terminated")
return err
}
}
continue retry
}
}
// FindPods finds pod in the job.
func (j *Job) FindPods(ctx context.Context, job *v1.Job) ([]corev1.Pod, error) {
labels := parseLabels(job.Spec.Template.Labels)
listOptions := metav1.ListOptions{
LabelSelector: labels,
}
podList, err := j.client.CoreV1().Pods(job.Namespace).List(ctx, listOptions)
if err != nil {
return []corev1.Pod{}, err
}
return podList.Items, err
}
// checkJobConditions checks conditions of all jobs.
// If any job is failed, returns error.
func checkJobConditions(conditions []v1.JobCondition) error {
for _, condition := range conditions {
if condition.Type == v1.JobFailed {
return fmt.Errorf("Job is failed: %s", condition.Reason)
}
}
return nil
}
// checkPodConditions check all pods related a job.
// Returns true, if all containers in the pods which are matched container name is completed.
func checkPodConditions(pods []corev1.Pod, containerName string) (bool, error) {
results := []bool{}
errs := []error{}
for _, pod := range pods {
if podIncludeContainer(pod, containerName) {
finished, err := containerIsCompleted(pod, containerName)
results = append(results, finished)
errs = append(errs, err)
}
}
if len(results) == 0 {
return false, nil
}
for _, r := range results {
if !r {
return false, nil
}
}
var err error
for _, e := range errs {
if e != nil {
err = e
}
}
return true, err
}
func podIncludeContainer(pod corev1.Pod, containerName string) bool {
for _, container := range pod.Spec.Containers {
if container.Name == containerName {
return true
}
}
return false
}
func containerIsCompleted(pod corev1.Pod, containerName string) (bool, error) {
if pod.Status.Phase == corev1.PodSucceeded {
return true, nil
}
if pod.Status.Phase == corev1.PodFailed {
return true, fmt.Errorf("%s Pod is failed", pod.Name)
}
if pod.Status.Phase == corev1.PodPending {
return false, nil
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == containerName && status.State.Terminated != nil {
if status.State.Terminated.ExitCode == 0 {
return true, nil
}
return true, fmt.Errorf("Container is failed: %s", status.State.Terminated.Reason)
}
}
return false, nil
}
// Cleanup removes the job from the kubernetes cluster.
func (j *Job) Cleanup() error {
ctx := context.Background()
log.Infof("Removing the job: %s", j.CurrentJob.Name)
options := metav1.DeleteOptions{}
err := j.client.BatchV1().Jobs(j.CurrentJob.Namespace).Delete(ctx, j.CurrentJob.Name, options)
if err != nil {
return err
}
return j.removePods(ctx)
}
func (j *Job) removePods(ctx context.Context) error {
// Use job-name to find pods which are related the job.
labels := "job-name=" + j.CurrentJob.Name
log.Infof("Remove related pods which labels is: %s", labels)
listOptions := metav1.ListOptions{
LabelSelector: labels,
}
options := metav1.DeleteOptions{
GracePeriodSeconds: nil, // Use default grace period seconds.
}
return j.client.CoreV1().Pods(j.CurrentJob.Namespace).DeleteCollection(ctx, options, listOptions)
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
core/database.go
|
package core
import (
"fmt"
"github.com/go-yaml/yaml"
"github.com/hunterlong/statup/types"
"github.com/hunterlong/statup/utils"
"os"
"strings"
"time"
"upper.io/db.v3"
"upper.io/db.v3/lib/sqlbuilder"
"upper.io/db.v3/mysql"
"upper.io/db.v3/postgresql"
"upper.io/db.v3/sqlite"
)
var (
dbServer string
sqliteSettings sqlite.ConnectionURL
postgresSettings postgresql.ConnectionURL
mysqlSettings mysql.ConnectionURL
DbSession sqlbuilder.Database
)
type DbConfig types.DbConfig
func DbConnection(dbType string) error {
var err error
if dbType == "sqlite" {
sqliteSettings = sqlite.ConnectionURL{
Database: "statup.db",
}
DbSession, err = sqlite.Open(sqliteSettings)
if err != nil {
return err
}
} else if dbType == "mysql" {
if Configs.Port == "" {
Configs.Port = "3306"
}
mysqlSettings = mysql.ConnectionURL{
Database: Configs.Database,
Host: Configs.Host,
User: Configs.User,
Password: Configs.Password,
}
DbSession, err = mysql.Open(mysqlSettings)
if err != nil {
return err
}
} else {
if Configs.Port == "" {
Configs.Port = "5432"
}
host := fmt.Sprintf("%v:%v", Configs.Host, Configs.Port)
postgresSettings = postgresql.ConnectionURL{
Database: Configs.Database,
Host: host,
User: Configs.User,
Password: Configs.Password,
}
DbSession, err = postgresql.Open(postgresSettings)
if err != nil {
return err
}
}
//dbSession.SetLogging(true)
dbServer = dbType
OnLoad(DbSession)
return err
}
func DatabaseMaintence() {
defer DatabaseMaintence()
utils.Log(1, "Checking for database records older than 7 days...")
since := time.Now().AddDate(0, 0, -7)
DeleteAllSince("failures", since)
DeleteAllSince("hits", since)
time.Sleep(60 * time.Minute)
}
func DeleteAllSince(table string, date time.Time) {
sql := fmt.Sprintf("DELETE FROM %v WHERE created_at < '%v';", table, date.Format("2006-01-02"))
_, err := DbSession.Exec(db.Raw(sql))
if err != nil {
utils.Log(2, err)
}
}
func (c *DbConfig) Save() error {
var err error
config, err := os.Create("config.yml")
if err != nil {
utils.Log(4, err)
return err
}
data, err := yaml.Marshal(c)
if err != nil {
utils.Log(3, err)
return err
}
config.WriteString(string(data))
config.Close()
Configs, err = LoadConfig()
if err != nil {
utils.Log(3, err)
return err
}
err = DbConnection(Configs.Connection)
if err != nil {
utils.Log(4, err)
return err
}
DropDatabase()
CreateDatabase()
newCore := &Core{
Name: c.Project,
Description: c.Description,
Config: "config.yml",
ApiKey: utils.NewSHA1Hash(9),
ApiSecret: utils.NewSHA1Hash(16),
Domain: c.Domain,
}
col := DbSession.Collection("core")
_, err = col.Insert(newCore)
if err == nil {
CoreApp = newCore
}
return err
}
func RunDatabaseUpgrades() {
utils.Log(1, "Running Database Upgrade from 'upgrade.sql'...")
upgrade, _ := SqlBox.String("upgrade.sql")
requests := strings.Split(upgrade, ";")
for _, request := range requests {
_, err := DbSession.Exec(db.Raw(request + ";"))
if err != nil {
utils.Log(2, err)
}
}
utils.Log(1, "Database Upgraded")
}
func DropDatabase() {
fmt.Println("Dropping Tables...")
down, _ := SqlBox.String("down.sql")
requests := strings.Split(down, ";")
for _, request := range requests {
_, err := DbSession.Exec(request)
if err != nil {
utils.Log(2, err)
}
}
}
func CreateDatabase() {
fmt.Println("Creating Tables...")
sql := "postgres_up.sql"
if dbServer == "mysql" {
sql = "mysql_up.sql"
} else if dbServer == "sqlite" {
sql = "sqlite_up.sql"
}
up, _ := SqlBox.String(sql)
requests := strings.Split(up, ";")
for _, request := range requests {
_, err := DbSession.Exec(request)
if err != nil {
utils.Log(2, err)
}
}
//secret := NewSHA1Hash()
//db.QueryRow("INSERT INTO core (secret, version) VALUES ($1, $2);", secret, VERSION).Scan()
fmt.Println("Database Created")
//SampleData()
}
func (c *DbConfig) Clean() *DbConfig {
if os.Getenv("DB_PORT") != "" {
if c.DbConn == "postgres" {
c.DbHost = c.DbHost + ":" + os.Getenv("DB_PORT")
}
}
return c
}
|
[
"\"DB_PORT\"",
"\"DB_PORT\""
] |
[] |
[
"DB_PORT"
] |
[]
|
["DB_PORT"]
|
go
| 1 | 0 | |
test/e2e/apps/resourcedistribution.go
|
/*
Copyright 2021 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apps
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"strings"
"time"
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned"
utils "github.com/openkruise/kruise/pkg/webhook/resourcedistribution/validating"
"github.com/openkruise/kruise/test/e2e/framework"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
)
var _ = SIGDescribe("ResourceDistribution", func() {
f := framework.NewDefaultFramework("resourcedistribution")
var ns, secretName string
var c clientset.Interface
var kc kruiseclientset.Interface
var tester *framework.ResourceDistributionTester
ginkgo.BeforeEach(func() {
c = f.ClientSet
kc = f.KruiseClientSet
ns = f.Namespace.Name
secretName = "resourcedistribution-e2e-test-secret"
tester = framework.NewResourceDistributionTester(c, kc)
})
framework.KruiseDescribe("ResourceDistribution distributing functionality [ResourceDistributionInject]", func() {
ginkgo.AfterEach(func() {
if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DumpDebugInfo(c, ns)
}
framework.Logf("Deleting all ResourceDistribution in cluster")
})
ginkgo.It("namespace event checker", func() {
prefix := "resourcedistribution-e2e-test1"
// clean resource to avoid conflict
tester.DeleteResourceDistributions(prefix)
tester.DeleteNamespaces(prefix)
// to be updated
namespaceUpdateMatch := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-update-matched",
},
}
tester.CreateNamespaces(namespaceUpdateMatch)
// create ResourceDistribution
resourceDistribution := tester.NewBaseResourceDistribution(prefix)
resourceDistribution.Spec.Targets.IncludedNamespaces.List = nil
resourceDistribution.Spec.Targets.NamespaceLabelSelector.MatchLabels = map[string]string{prefix: "seven"}
tester.CreateResourceDistribution(resourceDistribution)
// create matched Namespaces
tester.CreateNamespaces(&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-creat-matched",
Labels: map[string]string{prefix: "seven"},
},
})
// update namespace to match distributor
namespaceUpdateMatch.Labels = map[string]string{prefix: "seven"}
tester.UpdateNamespace(namespaceUpdateMatch)
// check matched namespace
ginkgo.By("waiting for namespace create and update...")
gomega.Eventually(func() int {
matchedNamespaces, _, err := tester.GetNamespaceForDistributor(&resourceDistribution.Spec.Targets)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return matchedNamespaces.Len()
}, time.Minute, time.Second).Should(gomega.Equal(2))
ginkgo.By("checking created secret...")
matchedNamespaces, _, err := tester.GetNamespaceForDistributor(&resourceDistribution.Spec.Targets)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for namespace := range matchedNamespaces {
gomega.Eventually(func() error {
_, err := tester.GetSecret(namespace, secretName, true)
return err
}, time.Minute, time.Second).ShouldNot(gomega.HaveOccurred())
}
//clear all resources in cluster
tester.DeleteResourceDistributions(prefix)
tester.DeleteNamespaces(prefix)
ginkgo.By("Done!")
})
ginkgo.It("resource event checker", func() {
prefix := "resourcedistribution-e2e-test2"
// clean resource to avoid conflict
tester.DeleteResourceDistributions(prefix)
tester.DeleteNamespaces(prefix)
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-2",
},
},
}
tester.CreateNamespaces(namespaces...)
// create ResourceDistribution
resourceDistribution := tester.NewBaseResourceDistribution(prefix)
tester.CreateResourceDistribution(resourceDistribution)
var err error
var secret *corev1.Secret
gomega.Eventually(func() error {
secret, err = tester.GetSecret(namespaces[0].Name, secretName, true)
return err
}, time.Minute, time.Second).Should(gomega.BeNil())
// If resource was modified directly, resourceDistribution should modify it back
ginkgo.By("update resource directly...")
secret.StringData = map[string]string{
"updated": "yes",
}
err = tester.UpdateSecret(secret)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Eventually(func() int {
secret, err = tester.GetSecret(namespaces[0].Name, secretName, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return len(secret.StringData)
}, time.Minute, time.Second).Should(gomega.Equal(0))
// If resource was deleted directly, resourceDistribution should create it again
ginkgo.By("delete resource directly...")
err = tester.DeleteSecret(secret.Namespace, secret.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Eventually(func() error {
secret, err = tester.GetSecret(namespaces[0].Name, secretName, true)
return err
}, time.Minute, time.Second).Should(gomega.BeNil())
//clear all resources in cluster
tester.DeleteResourceDistributions(prefix)
tester.DeleteNamespaces(prefix)
ginkgo.By("Done!")
})
ginkgo.It("resourcedistribution functionality checker", func() {
prefix := "resourcedistribution-e2e-test3"
// clean resource to avoid conflict
tester.DeleteResourceDistributions(prefix)
tester.DeleteNamespaces(prefix)
// build ResourceDistribution object
resourceDistribution := tester.NewBaseResourceDistribution(prefix)
cases := []struct {
name string
getNamespaces func() []*corev1.Namespace
}{
{
name: "normal resource distribution case",
getNamespaces: func() []*corev1.Namespace {
return []*corev1.Namespace{
&corev1.Namespace{ // for create
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-1",
Labels: map[string]string{
"e2e-rd-group": "one",
"environment": "develop",
},
},
},
&corev1.Namespace{ // for create
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-2",
Labels: map[string]string{
"e2e-rd-group": "one",
"environment": "develop",
},
},
},
&corev1.Namespace{ // for ExcludedNamespaces
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-3",
Labels: map[string]string{
"e2e-rd-group": "one",
"environment": "develop",
},
},
},
&corev1.Namespace{ // for create
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-4",
Labels: map[string]string{
"e2e-rd-group": "one",
"environment": "test",
},
},
},
&corev1.Namespace{ // for delete
ObjectMeta: metav1.ObjectMeta{
Name: prefix + "-5",
Labels: map[string]string{
"e2e-rd-group": "two",
"environment": "test",
},
},
},
}
},
},
}
for _, cs := range cases {
ginkgo.By(cs.name)
allNamespaces := cs.getNamespaces()
ginkgo.By(fmt.Sprintf("creating namespaces"))
tester.CreateNamespaces(allNamespaces...)
ginkgo.By(fmt.Sprintf("Creating ResourceDistribution %s", resourceDistribution.Name))
tester.CreateResourceDistribution(resourceDistribution)
var err error
var matchedNamespaces sets.String
var unmatchedNamespaces sets.String
// ensure namespaces have been created
gomega.Eventually(func() int {
matchedNamespaces, unmatchedNamespaces, err = tester.GetNamespaceForDistributor(&resourceDistribution.Spec.Targets)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return matchedNamespaces.Len()
}, time.Minute, time.Second).Should(gomega.Equal(4))
// ensure all desired resources have been created
gomega.Eventually(func() int32 {
resourceDistribution, err = tester.GetResourceDistribution(resourceDistribution.Name, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return resourceDistribution.Status.Succeeded
}, time.Minute, time.Second).Should(gomega.Equal(int32(len(matchedNamespaces))))
gomega.Expect(resourceDistribution.Status.Desired).Should(gomega.Equal(resourceDistribution.Status.Succeeded))
// checking created and updated resources
ginkgo.By("checking created and updated resources...")
md5Hash := sha256.Sum256(resourceDistribution.Spec.Resource.Raw)
consistentVersion := hex.EncodeToString(md5Hash[:])
for namespace := range matchedNamespaces {
object, err := tester.GetSecret(namespace, secretName, true)
ginkgo.By(fmt.Sprintf("checking distributed secret(%s.%s).", namespace, secretName))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(object.GetAnnotations()).ShouldNot(gomega.BeNil())
version := object.Annotations[utils.ResourceHashCodeAnnotation]
gomega.Expect(version).To(gomega.Equal(consistentVersion))
}
// checking deleted secrets
ginkgo.By("checking deleted secrets...")
for namespace := range unmatchedNamespaces {
// only focus on this e2e test
if !strings.HasPrefix(namespace, prefix) {
continue
}
object, err := tester.GetSecret(namespace, secretName, false)
gomega.Expect(errors.IsNotFound(err)).Should(gomega.BeTrue())
gomega.Expect(object).Should(gomega.BeNil())
}
// check status.conditions
ginkgo.By("checking conditions...")
gomega.Expect(resourceDistribution.Status.Conditions).To(gomega.HaveLen(6))
gomega.Expect(resourceDistribution.Status.Conditions[0].Status).To(gomega.Equal(appsv1alpha1.ResourceDistributionConditionFalse))
gomega.Expect(resourceDistribution.Status.Conditions[1].Status).To(gomega.Equal(appsv1alpha1.ResourceDistributionConditionFalse))
gomega.Expect(resourceDistribution.Status.Conditions[2].Status).To(gomega.Equal(appsv1alpha1.ResourceDistributionConditionFalse))
gomega.Expect(resourceDistribution.Status.Conditions[3].Status).To(gomega.Equal(appsv1alpha1.ResourceDistributionConditionFalse))
gomega.Expect(resourceDistribution.Status.Conditions[4].Status).To(gomega.Equal(appsv1alpha1.ResourceDistributionConditionFalse))
gomega.Expect(resourceDistribution.Status.Conditions[5].Status).To(gomega.Equal(appsv1alpha1.ResourceDistributionConditionFalse))
// checking after some included namespaces is deleted
tester.DeleteNamespace(allNamespaces[0])
gomega.Eventually(func() int {
mice, err := tester.GetResourceDistribution(resourceDistribution.Name, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return len(mice.Status.Conditions[5].FailedNamespaces)
}, time.Minute, time.Second).Should(gomega.Equal(1))
gomega.Expect(resourceDistribution.Status.Desired).Should(gomega.Equal(resourceDistribution.Status.Succeeded))
// checking after updating spec.targets
resourceDistribution.Spec.Targets.IncludedNamespaces.List = []appsv1alpha1.ResourceDistributionNamespace{{Name: prefix + "-2"}}
resourceDistribution.Spec.Targets.NamespaceLabelSelector.MatchLabels = map[string]string{"e2e-rd-group": "two"}
tester.UpdateResourceDistribution(resourceDistribution)
gomega.Eventually(func() int32 {
mice, err := tester.GetResourceDistribution(resourceDistribution.Name, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return mice.Status.Succeeded
}, time.Minute, time.Second).Should(gomega.Equal(int32(2)))
matchedNamespaces, unmatchedNamespaces, err = tester.GetNamespaceForDistributor(&resourceDistribution.Spec.Targets)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(matchedNamespaces.Len()).Should(gomega.Equal(2))
ginkgo.By("checking created secrets...")
for namespace := range matchedNamespaces {
object, err := tester.GetSecret(namespace, secretName, true)
ginkgo.By(fmt.Sprintf("checking distributed secret(%s.%s).", namespace, secretName))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(object).ShouldNot(gomega.BeNil())
version := object.Annotations[utils.ResourceHashCodeAnnotation]
gomega.Expect(version).To(gomega.Equal(consistentVersion))
}
ginkgo.By("checking deleted secrets...")
for namespace := range unmatchedNamespaces {
if !strings.HasPrefix(namespace, prefix) {
continue
}
object, err := tester.GetSecret(namespace, secretName, false)
gomega.Expect(errors.IsNotFound(err)).Should(gomega.BeTrue())
gomega.Expect(object).Should(gomega.BeNil())
}
ginkgo.By("checking all matched namespaces after distributor was deleted...")
tester.DeleteResourceDistributions(prefix)
for namespace := range matchedNamespaces {
if !strings.HasPrefix(namespace, prefix) {
continue
}
gomega.Eventually(func() bool {
_, err := tester.GetSecret(namespace, secretName, false)
return errors.IsNotFound(err)
}, time.Minute, time.Second).Should(gomega.BeTrue())
}
ginkgo.By("Done!")
tester.DeleteNamespaces(prefix)
}
})
})
})
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
setup.py
|
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
import codecs
import os
import subprocess
import sys
from distutils import cmd as distutils_cmd
from distutils import log as distutils_log
from itertools import chain
import setuptools
def is_build_action():
if len(sys.argv) <= 1:
return False
BUILD_TOKENS = ["egg_info", "dist", "bdist", "sdist", "install", "build", "develop", "style", "clean"]
if any([sys.argv[1].startswith(x) for x in BUILD_TOKENS]):
return True
else:
return False
if is_build_action():
os.environ['NEMO_PACKAGE_BUILDING'] = 'True'
from nemo.package_info import (
__contact_emails__,
__contact_names__,
__description__,
__download_url__,
__homepage__,
__keywords__,
__license__,
__package_name__,
__repository_url__,
__version__,
)
if os.path.exists('nemo/README.md'):
with open("nemo/README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
elif os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), 'r', 'utf-8',
).read()
long_description_content_type = "text/x-rst"
else:
long_description = 'See ' + __homepage__
###############################################################################
# Dependency Loading #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
def req_file(filename, folder="requirements"):
with open(os.path.join(folder, filename)) as f:
content = f.readlines()
# you may also want to remove whitespace characters
# Example: `\n` at the end of each line
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
extras_require = {
# User packages
'test': req_file("requirements_test.txt"),
# Collections Packages
'asr': req_file("requirements_asr.txt"),
'cv': req_file("requirements_cv.txt"),
'nlp': req_file("requirements_nlp.txt"),
'tts': req_file("requirements_tts.txt"),
}
extras_require['all'] = list(chain(extras_require.values()))
# TTS depends on ASR
extras_require['tts'] = list(chain([extras_require['tts'], extras_require['asr']]))
tests_requirements = extras_require["test"]
########################## VERSION MISMATCH PATCH #############################
# REMOVE AFTER 21.03 Container is released !
try:
import torch
version = torch.__version__
SUPPORTED_TORCH_VERSION = f"torch=={version}"
if 'a' in version or 'b' in version:
# It is githash release, force to supported Pytorch Lightning branch
SUPPORTED_PYTORCH_LIGHTNING = "pytorch-lightning==1.2.2"
else:
SUPPORTED_PYTORCH_LIGHTNING = "pytorch-lightning>=1.2.3"
except (ImportError, ModuleNotFoundError):
# Since no torch is installed, pip install torch will install latest torch and latest pytorch lightning
SUPPORTED_TORCH_VERSION = "torch"
SUPPORTED_PYTORCH_LIGHTNING = "pytorch-lightning>=1.2.3"
install_requires_buffer = []
for ix, line in enumerate(install_requires):
if 'lightning' in line:
install_requires_buffer.append(SUPPORTED_PYTORCH_LIGHTNING)
elif 'torch' in line:
install_requires_buffer.append(SUPPORTED_TORCH_VERSION)
# Pytorch 1.7.1 must use torchtext==0.8.0, torchaudio==0.7.2 and torchvision==0.8.2
if SUPPORTED_TORCH_VERSION == "torch<=1.7.1":
install_requires_buffer.append("torchvision==0.8.2")
install_requires_buffer.append("torchaudio==0.7.2")
install_requires_buffer.append("torchtext==0.8.0")
else:
install_requires_buffer.append(line)
# override install requires
install_requires = install_requires_buffer
###############################################################################
# Code style checkers #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
class StyleCommand(distutils_cmd.Command):
__LINE_WIDTH = 119
__ISORT_BASE = (
'isort '
# These two lines makes isort compatible with black.
'--multi-line=3 --trailing-comma --force-grid-wrap=0 '
f'--use-parentheses --line-width={__LINE_WIDTH} -rc -ws'
)
__BLACK_BASE = f'black --skip-string-normalization --line-length={__LINE_WIDTH}'
description = 'Checks overall project code style.'
user_options = [
('scope=', None, 'Folder of file to operate within.'),
('fix', None, 'True if tries to fix issues in-place.'),
]
def __call_checker(self, base_command, scope, check):
command = list(base_command)
command.append(scope)
if check:
command.extend(['--check', '--diff'])
self.announce(
msg='Running command: %s' % str(' '.join(command)), level=distutils_log.INFO,
)
return_code = subprocess.call(command)
return return_code
def _isort(self, scope, check):
return self.__call_checker(base_command=self.__ISORT_BASE.split(), scope=scope, check=check,)
def _black(self, scope, check):
return self.__call_checker(base_command=self.__BLACK_BASE.split(), scope=scope, check=check,)
def _pass(self):
self.announce(msg='\033[32mPASS\x1b[0m', level=distutils_log.INFO)
def _fail(self):
self.announce(msg='\033[31mFAIL\x1b[0m', level=distutils_log.INFO)
# noinspection PyAttributeOutsideInit
def initialize_options(self):
self.scope = '.'
self.fix = ''
def run(self):
scope, check = self.scope, not self.fix
isort_return = self._isort(scope=scope, check=check)
black_return = self._black(scope=scope, check=check)
if isort_return == 0 and black_return == 0:
self._pass()
else:
self._fail()
exit(isort_return if isort_return != 0 else black_return)
def finalize_options(self):
pass
###############################################################################
setuptools.setup(
name=__package_name__,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=__description__,
long_description=long_description,
long_description_content_type=long_description_content_type,
# The project's main homepage.
url=__repository_url__,
download_url=__download_url__,
# Author details
author=__contact_names__,
author_email=__contact_emails__,
# maintainer Details
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
# The licence under which the project is released
license=__license__,
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
# Indicate what your project relates to
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Supported python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Additional Setting
'Environment :: Console',
'Natural Language :: English',
'Operating System :: OS Independent',
],
packages=setuptools.find_packages(),
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=tests_requirements,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# $ pip install -e ".[all]"
# $ pip install nemo_toolkit[all]
extras_require=extras_require,
# Add in any packaged data.
include_package_data=True,
zip_safe=False,
# PyPI package information.
keywords=__keywords__,
# Custom commands.
cmdclass={'style': StyleCommand},
)
|
[] |
[] |
[
"NEMO_PACKAGE_BUILDING"
] |
[]
|
["NEMO_PACKAGE_BUILDING"]
|
python
| 1 | 0 | |
vendor/github.com/tailscale/hujson/internal/hujson/encode.go
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package json implements encoding and decoding of JSON as defined in
// RFC 7159. The mapping between JSON and Go values is described
// in the documentation for the Marshal and Unmarshal functions.
//
// See "JSON and Go" for an introduction to this package:
// https://golang.org/doc/articles/json_and_go.html
package hujson
import (
"bytes"
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// Marshal returns the JSON encoding of v.
//
// Marshal traverses the value v recursively.
// If an encountered value implements the Marshaler interface
// and is not a nil pointer, Marshal calls its MarshalJSON method
// to produce JSON. If no MarshalJSON method is present but the
// value implements encoding.TextMarshaler instead, Marshal calls
// its MarshalText method and encodes the result as a JSON string.
// The nil pointer exception is not strictly necessary
// but mimics a similar, necessary exception in the behavior of
// UnmarshalJSON.
//
// Otherwise, Marshal uses the following type-dependent default encodings:
//
// Boolean values encode as JSON booleans.
//
// Floating point, integer, and Number values encode as JSON numbers.
//
// String values encode as JSON strings coerced to valid UTF-8,
// replacing invalid bytes with the Unicode replacement rune.
// So that the JSON will be safe to embed inside HTML <script> tags,
// the string is encoded using HTMLEscape,
// which replaces "<", ">", "&", U+2028, and U+2029 are escaped
// to "\u003c","\u003e", "\u0026", "\u2028", and "\u2029".
// This replacement can be disabled when using an Encoder,
// by calling SetEscapeHTML(false).
//
// Array and slice values encode as JSON arrays, except that
// []byte encodes as a base64-encoded string, and a nil slice
// encodes as the null JSON value.
//
// Struct values encode as JSON objects.
// Each exported struct field becomes a member of the object, using the
// field name as the object key, unless the field is omitted for one of the
// reasons given below.
//
// The encoding of each struct field can be customized by the format string
// stored under the "json" key in the struct field's tag.
// The format string gives the name of the field, possibly followed by a
// comma-separated list of options. The name may be empty in order to
// specify options without overriding the default field name.
//
// The "omitempty" option specifies that the field should be omitted
// from the encoding if the field has an empty value, defined as
// false, 0, a nil pointer, a nil interface value, and any empty array,
// slice, map, or string.
//
// As a special case, if the field tag is "-", the field is always omitted.
// Note that a field with name "-" can still be generated using the tag "-,".
//
// Examples of struct field tags and their meanings:
//
// // Field appears in JSON as key "myName".
// Field int `json:"myName"`
//
// // Field appears in JSON as key "myName" and
// // the field is omitted from the object if its value is empty,
// // as defined above.
// Field int `json:"myName,omitempty"`
//
// // Field appears in JSON as key "Field" (the default), but
// // the field is skipped if empty.
// // Note the leading comma.
// Field int `json:",omitempty"`
//
// // Field is ignored by this package.
// Field int `json:"-"`
//
// // Field appears in JSON as key "-".
// Field int `json:"-,"`
//
// The "string" option signals that a field is stored as JSON inside a
// JSON-encoded string. It applies only to fields of string, floating point,
// integer, or boolean types. This extra level of encoding is sometimes used
// when communicating with JavaScript programs:
//
// Int64String int64 `json:",string"`
//
// The key name will be used if it's a non-empty string consisting of
// only Unicode letters, digits, and ASCII punctuation except quotation
// marks, backslash, and comma.
//
// Anonymous struct fields are usually marshaled as if their inner exported fields
// were fields in the outer struct, subject to the usual Go visibility rules amended
// as described in the next paragraph.
// An anonymous struct field with a name given in its JSON tag is treated as
// having that name, rather than being anonymous.
// An anonymous struct field of interface type is treated the same as having
// that type as its name, rather than being anonymous.
//
// The Go visibility rules for struct fields are amended for JSON when
// deciding which field to marshal or unmarshal. If there are
// multiple fields at the same level, and that level is the least
// nested (and would therefore be the nesting level selected by the
// usual Go rules), the following extra rules apply:
//
// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
// even if there are multiple untagged fields that would otherwise conflict.
//
// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
//
// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
//
// Handling of anonymous struct fields is new in Go 1.1.
// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
// an anonymous struct field in both current and earlier versions, give the field
// a JSON tag of "-".
//
// Map values encode as JSON objects. The map's key type must either be a
// string, an integer type, or implement encoding.TextMarshaler. The map keys
// are sorted and used as JSON object keys by applying the following rules,
// subject to the UTF-8 coercion described for string values above:
// - keys of any string type are used directly
// - encoding.TextMarshalers are marshaled
// - integer keys are converted to strings
//
// Pointer values encode as the value pointed to.
// A nil pointer encodes as the null JSON value.
//
// Interface values encode as the value contained in the interface.
// A nil interface value encodes as the null JSON value.
//
// Channel, complex, and function values cannot be encoded in JSON.
// Attempting to encode such a value causes Marshal to return
// an UnsupportedTypeError.
//
// JSON cannot represent cyclic data structures and Marshal does not
// handle them. Passing cyclic structures to Marshal will result in
// an infinite recursion.
//
func Marshal(v interface{}) ([]byte, error) {
e := newEncodeState()
err := e.marshal(v, encOpts{escapeHTML: true})
if err != nil {
return nil, err
}
buf := append([]byte(nil), e.Bytes()...)
e.Reset()
encodeStatePool.Put(e)
return buf, nil
}
// MarshalIndent is like Marshal but applies Indent to format the output.
// Each JSON element in the output will begin on a new line beginning with prefix
// followed by one or more copies of indent according to the indentation nesting.
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
b, err := Marshal(v)
if err != nil {
return nil, err
}
var buf bytes.Buffer
err = Indent(&buf, b, prefix, indent)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
// so that the JSON will be safe to embed inside HTML <script> tags.
// For historical reasons, web browsers don't honor standard HTML
// escaping within <script> tags, so an alternative JSON encoding must
// be used.
func HTMLEscape(dst *bytes.Buffer, src []byte) {
// The characters can only appear in string literals,
// so just scan the string one byte at a time.
start := 0
for i, c := range src {
if c == '<' || c == '>' || c == '&' {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
}
if start < len(src) {
dst.Write(src[start:])
}
}
// Marshaler is the interface implemented by types that
// can marshal themselves into valid JSON.
type Marshaler interface {
MarshalJSON() ([]byte, error)
}
// An UnsupportedTypeError is returned by Marshal when attempting
// to encode an unsupported value type.
type UnsupportedTypeError struct {
Type reflect.Type
}
func (e *UnsupportedTypeError) Error() string {
return "json: unsupported type: " + e.Type.String()
}
type UnsupportedValueError struct {
Value reflect.Value
Str string
}
func (e *UnsupportedValueError) Error() string {
return "json: unsupported value: " + e.Str
}
// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
// attempting to encode a string value with invalid UTF-8 sequences.
// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
// replacing invalid bytes with the Unicode replacement rune U+FFFD.
//
// Deprecated: No longer used; kept for compatibility.
type InvalidUTF8Error struct {
S string // the whole string value that caused the error
}
func (e *InvalidUTF8Error) Error() string {
return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
}
// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method.
type MarshalerError struct {
Type reflect.Type
Err error
}
func (e *MarshalerError) Error() string {
return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
}
func (e *MarshalerError) Unwrap() error { return e.Err }
var hex = "0123456789abcdef"
// An encodeState encodes JSON into a bytes.Buffer.
type encodeState struct {
bytes.Buffer // accumulated output
scratch [64]byte
}
var encodeStatePool sync.Pool
func newEncodeState() *encodeState {
if v := encodeStatePool.Get(); v != nil {
e := v.(*encodeState)
e.Reset()
return e
}
return new(encodeState)
}
// jsonError is an error wrapper type for internal use only.
// Panics with errors are wrapped in jsonError so that the top-level recover
// can distinguish intentional panics from this package.
type jsonError struct{ error }
func (e *encodeState) marshal(v interface{}, opts encOpts) (err error) {
defer func() {
if r := recover(); r != nil {
if je, ok := r.(jsonError); ok {
err = je.error
} else {
panic(r)
}
}
}()
e.reflectValue(reflect.ValueOf(v), opts)
return nil
}
// error aborts the encoding by panicking with err wrapped in jsonError.
func (e *encodeState) error(err error) {
panic(jsonError{err})
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) {
valueEncoder(v)(e, v, opts)
}
type encOpts struct {
// quoted causes primitive fields to be encoded inside JSON strings.
quoted bool
// escapeHTML causes '<', '>', and '&' to be escaped in JSON strings.
escapeHTML bool
}
type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts)
var encoderCache sync.Map // map[reflect.Type]encoderFunc
func valueEncoder(v reflect.Value) encoderFunc {
if !v.IsValid() {
return invalidValueEncoder
}
return typeEncoder(v.Type())
}
func typeEncoder(t reflect.Type) encoderFunc {
if fi, ok := encoderCache.Load(t); ok {
return fi.(encoderFunc)
}
// To deal with recursive types, populate the map with an
// indirect func before we build it. This type waits on the
// real func (f) to be ready and then calls it. This indirect
// func is only used for recursive types.
var (
wg sync.WaitGroup
f encoderFunc
)
wg.Add(1)
fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value, opts encOpts) {
wg.Wait()
f(e, v, opts)
}))
if loaded {
return fi.(encoderFunc)
}
// Compute the real encoder and replace the indirect func with it.
f = newTypeEncoder(t, true)
wg.Done()
encoderCache.Store(t, f)
return f
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
)
// newTypeEncoder constructs an encoderFunc for a type.
// The returned encoder only checks CanAddr when allowAddr is true.
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
if t.Implements(marshalerType) {
return marshalerEncoder
}
if t.Kind() != reflect.Ptr && allowAddr && reflect.PtrTo(t).Implements(marshalerType) {
return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
}
if t.Implements(textMarshalerType) {
return textMarshalerEncoder
}
if t.Kind() != reflect.Ptr && allowAddr && reflect.PtrTo(t).Implements(textMarshalerType) {
return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
}
switch t.Kind() {
case reflect.Bool:
return boolEncoder
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intEncoder
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintEncoder
case reflect.Float32:
return float32Encoder
case reflect.Float64:
return float64Encoder
case reflect.String:
return stringEncoder
case reflect.Interface:
return interfaceEncoder
case reflect.Struct:
return newStructEncoder(t)
case reflect.Map:
return newMapEncoder(t)
case reflect.Slice:
return newSliceEncoder(t)
case reflect.Array:
return newArrayEncoder(t)
case reflect.Ptr:
return newPtrEncoder(t)
default:
return unsupportedTypeEncoder
}
}
func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) {
e.WriteString("null")
}
func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if v.Kind() == reflect.Ptr && v.IsNil() {
e.WriteString("null")
return
}
m, ok := v.Interface().(Marshaler)
if !ok {
e.WriteString("null")
return
}
b, err := m.MarshalJSON()
if err == nil {
// copy JSON into buffer, checking validity.
err = compact(&e.Buffer, b, opts.escapeHTML)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
}
func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
va := v.Addr()
if va.IsNil() {
e.WriteString("null")
return
}
m := va.Interface().(Marshaler)
b, err := m.MarshalJSON()
if err == nil {
// copy JSON into buffer, checking validity.
err = compact(&e.Buffer, b, opts.escapeHTML)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
}
func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if v.Kind() == reflect.Ptr && v.IsNil() {
e.WriteString("null")
return
}
m := v.Interface().(encoding.TextMarshaler)
b, err := m.MarshalText()
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
e.stringBytes(b, opts.escapeHTML)
}
func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
va := v.Addr()
if va.IsNil() {
e.WriteString("null")
return
}
m := va.Interface().(encoding.TextMarshaler)
b, err := m.MarshalText()
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
e.stringBytes(b, opts.escapeHTML)
}
func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if opts.quoted {
e.WriteByte('"')
}
if v.Bool() {
e.WriteString("true")
} else {
e.WriteString("false")
}
if opts.quoted {
e.WriteByte('"')
}
}
func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
if opts.quoted {
e.WriteByte('"')
}
e.Write(b)
if opts.quoted {
e.WriteByte('"')
}
}
func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
if opts.quoted {
e.WriteByte('"')
}
e.Write(b)
if opts.quoted {
e.WriteByte('"')
}
}
type floatEncoder int // number of bits
func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
f := v.Float()
if math.IsInf(f, 0) || math.IsNaN(f) {
e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
}
// Convert as if by ES6 number to string conversion.
// This matches most other JSON generators.
// See golang.org/issue/6384 and golang.org/issue/14135.
// Like fmt %g, but the exponent cutoffs are different
// and exponents themselves are not padded to two digits.
b := e.scratch[:0]
abs := math.Abs(f)
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs != 0 {
if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
fmt = 'e'
}
}
b = strconv.AppendFloat(b, f, fmt, -1, int(bits))
if fmt == 'e' {
// clean up e-09 to e-9
n := len(b)
if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
b[n-2] = b[n-1]
b = b[:n-1]
}
}
if opts.quoted {
e.WriteByte('"')
}
e.Write(b)
if opts.quoted {
e.WriteByte('"')
}
}
var (
float32Encoder = (floatEncoder(32)).encode
float64Encoder = (floatEncoder(64)).encode
)
func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if v.Type() == numberType {
numStr := v.String()
// In Go1.5 the empty string encodes to "0", while this is not a valid number literal
// we keep compatibility so check validity after this.
if numStr == "" {
numStr = "0" // Number's zero-val
}
if !isValidNumber(numStr) {
e.error(fmt.Errorf("json: invalid number literal %q", numStr))
}
e.WriteString(numStr)
return
}
if opts.quoted {
sb, err := Marshal(v.String())
if err != nil {
e.error(err)
}
e.string(string(sb), opts.escapeHTML)
} else {
e.string(v.String(), opts.escapeHTML)
}
}
func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
e.reflectValue(v.Elem(), opts)
}
func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) {
e.error(&UnsupportedTypeError{v.Type()})
}
type structEncoder struct {
fields structFields
}
type structFields struct {
list []field
nameIndex map[string]int
// offsetField, if non-zero, is 1 + the index into the list
// slice for the field into which to store the int64 input offset
// of the open curly that begins a JSON object.
offsetField int
}
func (se structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
next := byte('{')
FieldLoop:
for i := range se.fields.list {
f := &se.fields.list[i]
// Find the nested struct field by following f.index.
fv := v
for _, i := range f.index {
if fv.Kind() == reflect.Ptr {
if fv.IsNil() {
continue FieldLoop
}
fv = fv.Elem()
}
fv = fv.Field(i)
}
if f.omitEmpty && isEmptyValue(fv) {
continue
}
e.WriteByte(next)
next = ','
if opts.escapeHTML {
e.WriteString(f.nameEscHTML)
} else {
e.WriteString(f.nameNonEsc)
}
opts.quoted = f.quoted
f.encoder(e, fv, opts)
}
if next == '{' {
e.WriteString("{}")
} else {
e.WriteByte('}')
}
}
func newStructEncoder(t reflect.Type) encoderFunc {
se := structEncoder{fields: cachedTypeFields(t)}
return se.encode
}
type mapEncoder struct {
elemEnc encoderFunc
}
func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
e.WriteByte('{')
// Extract and sort the keys.
keys := v.MapKeys()
sv := make([]reflectWithString, len(keys))
for i, v := range keys {
sv[i].v = v
if err := sv[i].resolve(); err != nil {
e.error(&MarshalerError{v.Type(), err})
}
}
sort.Slice(sv, func(i, j int) bool { return sv[i].s < sv[j].s })
for i, kv := range sv {
if i > 0 {
e.WriteByte(',')
}
e.string(kv.s, opts.escapeHTML)
e.WriteByte(':')
me.elemEnc(e, v.MapIndex(kv.v), opts)
}
e.WriteByte('}')
}
func newMapEncoder(t reflect.Type) encoderFunc {
switch t.Key().Kind() {
case reflect.String,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
default:
if !t.Key().Implements(textMarshalerType) {
return unsupportedTypeEncoder
}
}
me := mapEncoder{typeEncoder(t.Elem())}
return me.encode
}
func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
s := v.Bytes()
e.WriteByte('"')
encodedLen := base64.StdEncoding.EncodedLen(len(s))
if encodedLen <= len(e.scratch) {
// If the encoded bytes fit in e.scratch, avoid an extra
// allocation and use the cheaper Encoding.Encode.
dst := e.scratch[:encodedLen]
base64.StdEncoding.Encode(dst, s)
e.Write(dst)
} else if encodedLen <= 1024 {
// The encoded bytes are short enough to allocate for, and
// Encoding.Encode is still cheaper.
dst := make([]byte, encodedLen)
base64.StdEncoding.Encode(dst, s)
e.Write(dst)
} else {
// The encoded bytes are too long to cheaply allocate, and
// Encoding.Encode is no longer noticeably cheaper.
enc := base64.NewEncoder(base64.StdEncoding, e)
enc.Write(s)
enc.Close()
}
e.WriteByte('"')
}
// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
type sliceEncoder struct {
arrayEnc encoderFunc
}
func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
se.arrayEnc(e, v, opts)
}
func newSliceEncoder(t reflect.Type) encoderFunc {
// Byte slices get special treatment; arrays don't.
if t.Elem().Kind() == reflect.Uint8 {
p := reflect.PtrTo(t.Elem())
if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) {
return encodeByteSlice
}
}
enc := sliceEncoder{newArrayEncoder(t)}
return enc.encode
}
type arrayEncoder struct {
elemEnc encoderFunc
}
func (ae arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
e.WriteByte('[')
n := v.Len()
for i := 0; i < n; i++ {
if i > 0 {
e.WriteByte(',')
}
ae.elemEnc(e, v.Index(i), opts)
}
e.WriteByte(']')
}
func newArrayEncoder(t reflect.Type) encoderFunc {
enc := arrayEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type ptrEncoder struct {
elemEnc encoderFunc
}
func (pe ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
pe.elemEnc(e, v.Elem(), opts)
}
func newPtrEncoder(t reflect.Type) encoderFunc {
enc := ptrEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type condAddrEncoder struct {
canAddrEnc, elseEnc encoderFunc
}
func (ce condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.CanAddr() {
ce.canAddrEnc(e, v, opts)
} else {
ce.elseEnc(e, v, opts)
}
}
// newCondAddrEncoder returns an encoder that checks whether its value
// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
enc := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
return enc.encode
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
case !unicode.IsLetter(c) && !unicode.IsDigit(c):
return false
}
}
return true
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
type reflectWithString struct {
v reflect.Value
s string
}
func (w *reflectWithString) resolve() error {
if w.v.Kind() == reflect.String {
w.s = w.v.String()
return nil
}
if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok {
buf, err := tm.MarshalText()
w.s = string(buf)
return err
}
switch w.v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
w.s = strconv.FormatInt(w.v.Int(), 10)
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
w.s = strconv.FormatUint(w.v.Uint(), 10)
return nil
}
panic("unexpected map key type")
}
// NOTE: keep in sync with stringBytes below.
func (e *encodeState) string(s string, escapeHTML bool) {
e.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
i++
continue
}
if start < i {
e.WriteString(s[start:i])
}
e.WriteByte('\\')
switch b {
case '\\', '"':
e.WriteByte(b)
case '\n':
e.WriteByte('n')
case '\r':
e.WriteByte('r')
case '\t':
e.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \t, \n and \r.
// If escapeHTML is set, it also escapes <, >, and &
// because they can lead to security holes when
// user-controlled strings are rendered into JSON
// and served to some browsers.
e.WriteString(`u00`)
e.WriteByte(hex[b>>4])
e.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
e.WriteString(s[start:i])
}
e.WriteString(`\ufffd`)
i += size
start = i
continue
}
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
if c == '\u2028' || c == '\u2029' {
if start < i {
e.WriteString(s[start:i])
}
e.WriteString(`\u202`)
e.WriteByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
e.WriteString(s[start:])
}
e.WriteByte('"')
}
// NOTE: keep in sync with string above.
func (e *encodeState) stringBytes(s []byte, escapeHTML bool) {
e.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
i++
continue
}
if start < i {
e.Write(s[start:i])
}
e.WriteByte('\\')
switch b {
case '\\', '"':
e.WriteByte(b)
case '\n':
e.WriteByte('n')
case '\r':
e.WriteByte('r')
case '\t':
e.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \t, \n and \r.
// If escapeHTML is set, it also escapes <, >, and &
// because they can lead to security holes when
// user-controlled strings are rendered into JSON
// and served to some browsers.
e.WriteString(`u00`)
e.WriteByte(hex[b>>4])
e.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRune(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
e.Write(s[start:i])
}
e.WriteString(`\ufffd`)
i += size
start = i
continue
}
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
if c == '\u2028' || c == '\u2029' {
if start < i {
e.Write(s[start:i])
}
e.WriteString(`\u202`)
e.WriteByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
e.Write(s[start:])
}
e.WriteByte('"')
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
nameNonEsc string // `"` + name + `":`
nameEscHTML string // `"` + HTMLEscape(name) + `":`
tag bool
isInputOffset bool
index []int
typ reflect.Type
omitEmpty bool
quoted bool
encoder encoderFunc
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) structFields {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
var count, nextCount map[reflect.Type]int
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
// Buffer to run HTMLEscape on field names.
var nameEscBuf bytes.Buffer
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
isUnexported := sf.PkgPath != ""
if sf.Anonymous {
t := sf.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if isUnexported && t.Kind() != reflect.Struct {
// Ignore embedded fields of unexported non-struct types.
continue
}
// Do not ignore embedded fields of unexported struct types
// since they may have exported fields.
} else if isUnexported {
// Ignore unexported non-embedded fields.
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
hutag := sf.Tag.Get("hujson")
_, huOpts := parseTag(hutag)
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Only strings, floats, integers, and booleans can be quoted.
quoted := false
if opts.Contains("string") {
switch ft.Kind() {
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64,
reflect.String:
quoted = true
}
}
isInputOffset := huOpts.Contains("inputoffset") && ft.Kind() == reflect.Int64
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct || isInputOffset {
tagged := name != ""
if name == "" {
name = sf.Name
}
field := field{
name: name,
tag: tagged,
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
quoted: quoted,
}
field.nameBytes = []byte(field.name)
field.equalFold = foldFunc(field.nameBytes)
field.isInputOffset = isInputOffset
// Build nameEscHTML and nameNonEsc ahead of time.
nameEscBuf.Reset()
nameEscBuf.WriteString(`"`)
HTMLEscape(&nameEscBuf, field.nameBytes)
nameEscBuf.WriteString(`":`)
field.nameEscHTML = nameEscBuf.String()
field.nameNonEsc = `"` + field.name + `":`
fields = append(fields, field)
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft})
}
}
}
}
sort.Slice(fields, func(i, j int) bool {
x := fields
// sort field by name, breaking ties with depth, then
// breaking ties with "name came from json tag", then
// breaking ties with index sequence.
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
})
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
for i := range fields {
f := &fields[i]
f.encoder = typeEncoder(typeByIndex(t, f.index))
}
nameIndex := make(map[string]int, len(fields))
var offsetField int
for i, field := range fields {
nameIndex[field.name] = i
if field.isInputOffset {
offsetField = i + 1
}
}
return structFields{fields, nameIndex, offsetField}
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order, then by presence of tag.
// That means that the first field is the dominant one. We need only check
// for error cases: two fields at top level, either both tagged or neither tagged.
if len(fields) > 1 && len(fields[0].index) == len(fields[1].index) && fields[0].tag == fields[1].tag {
return field{}, false
}
return fields[0], true
}
var fieldCache sync.Map // map[reflect.Type]structFields
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) structFields {
if f, ok := fieldCache.Load(t); ok {
return f.(structFields)
}
f, _ := fieldCache.LoadOrStore(t, typeFields(t))
return f.(structFields)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
torchkeras/torchkeras.py
|
# -*- coding: utf-8 -*-
import os
import torch
import numpy as np
import pandas as pd
from torchkeras.summary import summary
from torchkeras.torchtools import EarlyStopping
from torchkeras.utils import log_to_message, ProgressBar
__version__ = "2.2.1"
# On macOs, run pytorch and matplotlib at the same time in jupyter should set this.
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class Model(torch.nn.Module):
def __init__(self, net=None):
super(Model, self).__init__()
self.net = net
def forward(self, x):
if self.net:
return self.net.forward(x)
else:
raise NotImplementedError
def compile(self, loss_func, optimizer=None, metrics_dict=None, device=None):
"""
Compile the model similar to Keras' .compile(...) method
# Arguments
loss_func: training loss
optimizer: training optimizer
metrics_dict: list of functions with signatures `metric(y_true, y_pred)`
where y_true and y_pred are both Tensors
device: run device
"""
self.history = {}
self.loss_func = loss_func
self.metrics_dict = metrics_dict if metrics_dict else {}
self.optimizer = optimizer if optimizer else torch.optim.Adam(self.parameters(), lr=0.001)
self.device = device if torch.cuda.is_available() else None
if self.device:
self.to(self.device)
def summary(self, input_shape, input_type=torch.FloatTensor, batch_size=-1):
summary(self, input_shape, input_type, batch_size)
def train_step(self, features, labels):
self.train()
self.optimizer.zero_grad()
if self.device:
features = features.to(self.device)
labels = labels.to(self.device)
# forward
predictions = self.forward(features)
loss = self.loss_func(predictions, labels)
# evaluate metrics
train_metrics = {"loss": loss.item()}
for metrics in self.metrics_dict:
train_metrics[metrics.__name__] = metrics(predictions, labels).item()
# backward
loss.backward()
# update parameters
self.optimizer.step()
self.optimizer.zero_grad()
return train_metrics
@torch.no_grad()
def evaluate_step(self, features, labels):
self.eval()
if self.device:
features = features.to(self.device)
labels = labels.to(self.device)
with torch.no_grad():
predictions = self.forward(features)
loss = self.loss_func(predictions, labels)
val_metrics = {"val_loss": loss.item()}
for metrics in self.metrics_dict:
val_metrics["val_" + metrics.__name__] = metrics(predictions, labels).item()
return val_metrics
def fit(self, train_data, val_data=None, epochs=10, patience=0, monitor="val_loss", save_path='checkpoint.pt', verbose=True):
"""
Trains the model similar to Keras' .fit(...) method
# Arguments
train_data: Training data Tensor.
val_data: Evaluate data Tensor.
epochs: integer, The number of times to iterate.
patience: integer, How long to wait after last time validation loss improved.
monitor: str, The metric name to monitor.
save_path: str, Path for the checkpoint to be saved to.
verbose : bool, If True, prints a message for each validation loss improvement.
# Returns
DataFrame with training metrics
"""
val_data = val_data if val_data else []
# initialize the early_stopping object (if patience!=0, run early_stopping)
if patience != 0:
early_stopping = EarlyStopping(patience=patience, path=save_path, verbose=verbose)
for epoch in range(1, epochs+1):
print("Epoch {0} / {1}".format(epoch, epochs))
pb = ProgressBar(len(train_data))
# 1,training loop -------------------------------------------------
train_metrics_sum, log, step = {}, {}, 0
for features, labels in train_data:
step += 1
train_metrics = self.train_step(features, labels)
for name, metric in train_metrics.items():
train_metrics_sum[name] = train_metrics_sum.get(name, 0.0) + metric
# Live Update ProgressBar
for name, metric_sum in train_metrics_sum.items():
log[name] = metric_sum / step
pb.bar(step-1, log_to_message(log))
for name, metric_sum in train_metrics_sum.items():
self.history[name] = self.history.get(name, []) + [metric_sum / step]
# 2,validate loop -------------------------------------------------
val_metrics_sum, step = {}, 0
for features, labels in val_data:
step = step + 1
val_metrics = self.evaluate_step(features, labels)
for name, metric in val_metrics.items():
val_metrics_sum[name] = val_metrics_sum.get(name, 0.0) + metric
for name, metric_sum in val_metrics_sum.items():
self.history[name] = self.history.get(name, []) + [metric_sum / step]
# 3,print logs -------------------------------------------------
pb.close(log_to_message({k: round(self.history[k][-1], 4) for k in self.history}))
# early_stopping needs the validation loss to check if it has decresed,
# and if it has, it will make a checkpoint of the current model
if patience != 0:
early_stopping(self.history[monitor][-1], self)
if early_stopping.early_stop:
print("Early stopping")
break
return pd.DataFrame(self.history)
@torch.no_grad()
def evaluate(self, val_data):
self.eval()
val_metrics_list = {}
for features, labels in val_data:
val_metrics = self.evaluate_step(features, labels)
for name, metric in val_metrics.items():
val_metrics_list[name[4:]] = val_metrics_list.get(name[4:], []) + [metric]
return {name: np.mean(metric_list) for name, metric_list in val_metrics_list.items()}
@torch.no_grad()
def predict(self, dl):
self.eval()
if self.device:
result = torch.cat([self.forward(t[0].to(self.device)) for t in dl])
else:
result = torch.cat([self.forward(t[0]) for t in dl])
return result.data
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
test/e2e/system_connection_test.go
|
package integration
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"os/user"
"path/filepath"
"github.com/containers/common/pkg/config"
. "github.com/containers/podman/v4/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("podman system connection", func() {
ConfPath := struct {
Value string
IsSet bool
}{}
var podmanTest *PodmanTestIntegration
BeforeEach(func() {
ConfPath.Value, ConfPath.IsSet = os.LookupEnv("CONTAINERS_CONF")
conf, err := ioutil.TempFile("", "containersconf")
Expect(err).ToNot(HaveOccurred())
os.Setenv("CONTAINERS_CONF", conf.Name())
tempdir, err := CreateTempDirInTempDir()
Expect(err).ToNot(HaveOccurred())
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
})
AfterEach(func() {
podmanTest.Cleanup()
os.Remove(os.Getenv("CONTAINERS_CONF"))
if ConfPath.IsSet {
os.Setenv("CONTAINERS_CONF", ConfPath.Value)
} else {
os.Unsetenv("CONTAINERS_CONF")
}
f := CurrentGinkgoTestDescription()
GinkgoWriter.Write(
[]byte(
fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())))
})
Context("without running API service", func() {
It("add ssh://", func() {
cmd := []string{"system", "connection", "add",
"--default",
"--identity", "~/.ssh/id_rsa",
"QA",
"ssh://[email protected]:2222/run/podman/podman.sock",
}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.Out.Contents()).Should(BeEmpty())
cfg, err := config.ReadCustomConfig()
Expect(err).ShouldNot(HaveOccurred())
Expect(cfg).To(HaveActiveService("QA"))
Expect(cfg).Should(VerifyService(
"QA",
"ssh://[email protected]:2222/run/podman/podman.sock",
"~/.ssh/id_rsa",
))
cmd = []string{"system", "connection", "rename",
"QA",
"QE",
}
session = podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(config.ReadCustomConfig()).To(HaveActiveService("QE"))
})
It("add UDS", func() {
cmd := []string{"system", "connection", "add",
"QA-UDS",
"unix:///run/podman/podman.sock",
}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.Out.Contents()).Should(BeEmpty())
Expect(config.ReadCustomConfig()).Should(VerifyService(
"QA-UDS",
"unix:///run/podman/podman.sock",
"",
))
cmd = []string{"system", "connection", "add",
"QA-UDS1",
"--socket-path", "/run/user/podman/podman.sock",
"unix:///run/podman/podman.sock",
}
session = podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.Out.Contents()).Should(BeEmpty())
Expect(config.ReadCustomConfig()).Should(HaveActiveService("QA-UDS"))
Expect(config.ReadCustomConfig()).Should(VerifyService(
"QA-UDS1",
"unix:///run/user/podman/podman.sock",
"",
))
})
It("add tcp", func() {
cmd := []string{"system", "connection", "add",
"QA-TCP",
"tcp://localhost:8888",
}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.Out.Contents()).Should(BeEmpty())
Expect(config.ReadCustomConfig()).Should(VerifyService(
"QA-TCP",
"tcp://localhost:8888",
"",
))
})
It("remove", func() {
session := podmanTest.Podman([]string{"system", "connection", "add",
"--default",
"--identity", "~/.ssh/id_rsa",
"QA",
"ssh://[email protected]:2222/run/podman/podman.sock",
})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
// two passes to test that removing non-existent connection is not an error
for i := 0; i < 2; i++ {
session = podmanTest.Podman([]string{"system", "connection", "remove", "QA"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.Out.Contents()).Should(BeEmpty())
cfg, err := config.ReadCustomConfig()
Expect(err).ShouldNot(HaveOccurred())
Expect(cfg.Engine.ActiveService).To(BeEmpty())
Expect(cfg.Engine.ServiceDestinations).To(BeEmpty())
}
})
It("remove --all", func() {
session := podmanTest.Podman([]string{"system", "connection", "add",
"--default",
"--identity", "~/.ssh/id_rsa",
"QA",
"ssh://[email protected]:2222/run/podman/podman.sock",
})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
session = podmanTest.Podman([]string{"system", "connection", "remove", "--all"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.Out.Contents()).Should(BeEmpty())
Expect(session.Err.Contents()).Should(BeEmpty())
session = podmanTest.Podman([]string{"system", "connection", "list"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
})
It("default", func() {
for _, name := range []string{"devl", "qe"} {
cmd := []string{"system", "connection", "add",
"--default",
"--identity", "~/.ssh/id_rsa",
name,
"ssh://[email protected]:2222/run/podman/podman.sock",
}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}
cmd := []string{"system", "connection", "default", "devl"}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.Out.Contents()).Should(BeEmpty())
Expect(config.ReadCustomConfig()).Should(HaveActiveService("devl"))
cmd = []string{"system", "connection", "list"}
session = podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.Out).Should(Say("Name *URI *Identity *Default"))
cmd = []string{"system", "connection", "list", "--format", "{{.Name}}"}
session = podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.OutputToString()).Should(Equal("devl qe"))
})
It("failed default", func() {
cmd := []string{"system", "connection", "default", "devl"}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).ShouldNot(Exit(0))
Expect(session.Err).Should(Say("destination is not defined"))
})
It("failed rename", func() {
cmd := []string{"system", "connection", "rename", "devl", "QE"}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).ShouldNot(Exit(0))
Expect(session.Err).Should(Say("destination is not defined"))
})
It("empty list", func() {
cmd := []string{"system", "connection", "list"}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.OutputToStringArray()).Should(HaveLen(1))
Expect(session.Err.Contents()).Should(BeEmpty())
})
})
Context("sshd and API services required", func() {
BeforeEach(func() {
// These tests are unique in as much as they require podman, podman-remote, systemd and sshd.
// podman-remote commands will be executed by ginkgo directly.
SkipIfContainerized("sshd is not available when running in a container")
SkipIfRemote("connection heuristic requires both podman and podman-remote binaries")
SkipIfNotRootless("FIXME: setup ssh keys when root")
SkipIfSystemdNotRunning("cannot test connection heuristic if systemd is not running")
SkipIfNotActive("sshd", "cannot test connection heuristic if sshd is not running")
})
It("add ssh:// socket path using connection heuristic", func() {
u, err := user.Current()
Expect(err).ShouldNot(HaveOccurred())
cmd := exec.Command(podmanTest.RemotePodmanBinary,
"system", "connection", "add",
"--default",
"--identity", filepath.Join(u.HomeDir, ".ssh", "id_ed25519"),
"QA",
fmt.Sprintf("ssh://%s@localhost", u.Username))
session, err := Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("%q failed to execute", podmanTest.RemotePodmanBinary))
Eventually(session, DefaultWaitTimeout).Should(Exit(0))
Expect(session.Out.Contents()).Should(BeEmpty())
Expect(session.Err.Contents()).Should(BeEmpty())
uri := url.URL{
Scheme: "ssh",
User: url.User(u.Username),
Host: "localhost:22",
Path: fmt.Sprintf("/run/user/%s/podman/podman.sock", u.Uid),
}
Expect(config.ReadCustomConfig()).Should(HaveActiveService("QA"))
Expect(config.ReadCustomConfig()).Should(VerifyService(
"QA",
uri.String(),
filepath.Join(u.HomeDir, ".ssh", "id_ed25519"),
))
})
})
})
|
[
"\"CONTAINERS_CONF\""
] |
[] |
[
"CONTAINERS_CONF"
] |
[]
|
["CONTAINERS_CONF"]
|
go
| 1 | 0 | |
cache/redis.go
|
package cache
import (
"net/http"
"net/url"
"os"
"sync"
"time"
"github.com/adhocore/urlsh/model"
"github.com/gomodule/redigo/redis"
)
var once sync.Once
var pool *redis.Pool
var prefix = "url:"
func connect() {
dsn := os.Getenv("REDIS_URL")
if dsn == "" {
dsn = os.Getenv("HEROKU_REDIS_MAUVE_URL")
}
if dsn == "" {
return
}
parse, _ := url.Parse(dsn)
user := parse.User.Username()
pass, _ := parse.User.Password()
pool = &redis.Pool{
MaxIdle: 12,
IdleTimeout: 300 * time.Second,
Dial: func() (redis.Conn, error) {
return redis.Dial("tcp", parse.Host, redis.DialUsername(user), redis.DialPassword(pass))
},
}
}
// Connection connects to redis once and returns the connection
func Connection() redis.Conn {
once.Do(connect)
if nil != pool {
return pool.Get()
}
return nil
}
// LookupURL looks up if certain short code is popular enough to be in cache
// It returns model.URL so the request can be served right way without db hit.
func LookupURL(shortCode string) (model.URL, int) {
var urlModel model.URL
conn := Connection()
if nil == conn {
return urlModel, 0
}
defer conn.Close()
line, err := conn.Do("GET", urlKey(model.URL{ShortCode: shortCode}))
if err != nil || line == nil {
return urlModel, 0
}
data := string(line.([]uint8))
urlModel.OriginURL = data[1:]
urlModel.ShortCode = shortCode
// 0 = Inactive, 1 = Active
if data[0:1] == "0" {
return urlModel, http.StatusGone
}
return urlModel, http.StatusMovedPermanently
}
// DeactivateURL deactivates cache of an expired/deleted model.URL
// PS, this operation is always cached so Gone (410) can be served without db hit.
func DeactivateURL(urlModel model.URL) {
cacheModel, _ := LookupURL(urlModel.ShortCode)
if urlModel.OriginURL == "" {
urlModel.OriginURL = cacheModel.OriginURL
}
SavePopularURL(urlModel, true)
}
// SavePopularURL saves a model.URL to cache
// If force is passed, it saves even if already exists
func SavePopularURL(urlModel model.URL, force bool) {
conn := Connection()
if nil == conn || (!force && hasURL(urlModel)) {
return
}
defer conn.Close()
_, _ = conn.Do("SET", urlKey(urlModel), urlValue(urlModel))
}
func hasURL(urlModel model.URL) bool {
conn := Connection()
if nil == conn {
return false
}
defer conn.Close()
exist, err := conn.Do("EXISTS", urlKey(urlModel))
return err == nil && exist.(int64) > 0
}
func urlKey(urlModel model.URL) string {
return prefix + urlModel.ShortCode
}
func urlValue(urlModel model.URL) string {
active := "0"
if urlModel.IsActive() {
active = "1"
}
return active + urlModel.OriginURL
}
|
[
"\"REDIS_URL\"",
"\"HEROKU_REDIS_MAUVE_URL\""
] |
[] |
[
"REDIS_URL",
"HEROKU_REDIS_MAUVE_URL"
] |
[]
|
["REDIS_URL", "HEROKU_REDIS_MAUVE_URL"]
|
go
| 2 | 0 | |
toggl-rounder.go
|
package main
import (
"flag"
"fmt"
"os"
"github.com/gookit/color"
"github.com/ravbaker/toggl-rounder/internal/rounder"
)
var version, colors, dryRun, debugMode *bool
var apiKey, remainingStrategy *string
func main() {
parseArgs()
appConfig := rounder.NewConfig(*dryRun, *debugMode, *remainingStrategy)
if !*colors {
color.Disable()
}
if *version {
rounder.PrintVersion()
return
}
rounder.RoundThisMonth(*apiKey, appConfig)
}
func parseArgs() {
version = flag.Bool("version", false, "Print the version")
colors = flag.Bool("colors", true, "Display colorful output in Terminal")
apiKey = flag.String("api-key", os.Getenv("TOGGL_API_KEY"), "Toggl API KEY `secret-key`, can also be provided via $TOGGL_API_KEY environment variable")
dryRun = flag.Bool("dry", true, "Unless set to false it doesn't update records in Toggl")
remainingStrategy = flag.String("remaining", "keep", fmt.Sprintf("Decides on what to do with remaining time. Possible options: %q", rounder.AllowedRemainingStrategies))
debugMode = flag.Bool("debug", false, "Print debugging output of API calls")
flag.Parse()
if *version {
return
}
if !rounder.IsAllowedRemainingStrategy(*remainingStrategy) {
fmt.Printf("Not allowed -remaining value: '%s'. Allowed: %q\n", *remainingStrategy, rounder.AllowedRemainingStrategies)
os.Exit(-1)
}
if *apiKey == "" {
fmt.Println("Missing value for -api-key", "\t", flag.Lookup("api-key").Usage)
os.Exit(-1)
}
}
|
[
"\"TOGGL_API_KEY\""
] |
[] |
[
"TOGGL_API_KEY"
] |
[]
|
["TOGGL_API_KEY"]
|
go
| 1 | 0 | |
gclient_scm.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Gclient-specific SCM-specific operations."""
from __future__ import print_function
import errno
import logging
import os
import posixpath
import re
import sys
import tempfile
import traceback
import urlparse
import download_from_google_storage
import gclient_utils
import git_cache
import scm
import shutil
import subprocess2
THIS_FILE_PATH = os.path.abspath(__file__)
GSUTIL_DEFAULT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
class NoUsableRevError(gclient_utils.Error):
"""Raised if requested revision isn't found in checkout."""
class DiffFiltererWrapper(object):
"""Simple base class which tracks which file is being diffed and
replaces instances of its file name in the original and
working copy lines of the svn/git diff output."""
index_string = None
original_prefix = "--- "
working_prefix = "+++ "
def __init__(self, relpath, print_func):
# Note that we always use '/' as the path separator to be
# consistent with svn's cygwin-style output on Windows
self._relpath = relpath.replace("\\", "/")
self._current_file = None
self._print_func = print_func
def SetCurrentFile(self, current_file):
self._current_file = current_file
@property
def _replacement_file(self):
return posixpath.join(self._relpath, self._current_file)
def _Replace(self, line):
return line.replace(self._current_file, self._replacement_file)
def Filter(self, line):
if (line.startswith(self.index_string)):
self.SetCurrentFile(line[len(self.index_string):])
line = self._Replace(line)
else:
if (line.startswith(self.original_prefix) or
line.startswith(self.working_prefix)):
line = self._Replace(line)
self._print_func(line)
class SvnDiffFilterer(DiffFiltererWrapper):
index_string = "Index: "
class GitDiffFilterer(DiffFiltererWrapper):
index_string = "diff --git "
def SetCurrentFile(self, current_file):
# Get filename by parsing "a/<filename> b/<filename>"
self._current_file = current_file[:(len(current_file)/2)][2:]
def _Replace(self, line):
return re.sub("[a|b]/" + self._current_file, self._replacement_file, line)
### SCM abstraction layer
# Factory Method for SCM wrapper creation
def GetScmName(url):
if url:
url, _ = gclient_utils.SplitUrlRevision(url)
if (url.startswith('git://') or url.startswith('ssh://') or
url.startswith('git+http://') or url.startswith('git+https://') or
url.endswith('.git') or url.startswith('sso://') or
'googlesource' in url):
return 'git'
elif (url.startswith('http://') or url.startswith('https://') or
url.startswith('svn://') or url.startswith('svn+ssh://')):
return 'svn'
elif url.startswith('file://'):
if url.endswith('.git'):
return 'git'
return 'svn'
return None
def CreateSCM(url, root_dir=None, relpath=None, out_fh=None, out_cb=None):
SCM_MAP = {
'svn' : SVNWrapper,
'git' : GitWrapper,
}
scm_name = GetScmName(url)
if not scm_name in SCM_MAP:
raise gclient_utils.Error('No SCM found for url %s' % url)
scm_class = SCM_MAP[scm_name]
if not scm_class.BinaryExists():
raise gclient_utils.Error('%s command not found' % scm_name)
return scm_class(url, root_dir, relpath, out_fh, out_cb)
# SCMWrapper base class
class SCMWrapper(object):
"""Add necessary glue between all the supported SCM.
This is the abstraction layer to bind to different SCM.
"""
def __init__(self, url=None, root_dir=None, relpath=None, out_fh=None,
out_cb=None):
self.url = url
self._root_dir = root_dir
if self._root_dir:
self._root_dir = self._root_dir.replace('/', os.sep)
self.relpath = relpath
if self.relpath:
self.relpath = self.relpath.replace('/', os.sep)
if self.relpath and self._root_dir:
self.checkout_path = os.path.join(self._root_dir, self.relpath)
if out_fh is None:
out_fh = sys.stdout
self.out_fh = out_fh
self.out_cb = out_cb
def Print(self, *args, **kwargs):
kwargs.setdefault('file', self.out_fh)
if kwargs.pop('timestamp', True):
self.out_fh.write('[%s] ' % gclient_utils.Elapsed())
print(*args, **kwargs)
def RunCommand(self, command, options, args, file_list=None):
commands = ['cleanup', 'update', 'updatesingle', 'revert',
'revinfo', 'status', 'diff', 'pack', 'runhooks']
if not command in commands:
raise gclient_utils.Error('Unknown command %s' % command)
if not command in dir(self):
raise gclient_utils.Error('Command %s not implemented in %s wrapper' % (
command, self.__class__.__name__))
return getattr(self, command)(options, args, file_list)
@staticmethod
def _get_first_remote_url(checkout_path):
log = scm.GIT.Capture(
['config', '--local', '--get-regexp', r'remote.*.url'],
cwd=checkout_path)
# Get the second token of the first line of the log.
return log.splitlines()[0].split(' ', 1)[1]
def GetActualRemoteURL(self, options):
"""Attempt to determine the remote URL for this SCMWrapper."""
# Git
if os.path.exists(os.path.join(self.checkout_path, '.git')):
actual_remote_url = self._get_first_remote_url(self.checkout_path)
# If a cache_dir is used, obtain the actual remote URL from the cache.
if getattr(self, 'cache_dir', None):
url, _ = gclient_utils.SplitUrlRevision(self.url)
mirror = git_cache.Mirror(url)
if (mirror.exists() and mirror.mirror_path.replace('\\', '/') ==
actual_remote_url.replace('\\', '/')):
actual_remote_url = self._get_first_remote_url(mirror.mirror_path)
return actual_remote_url
# Svn
if os.path.exists(os.path.join(self.checkout_path, '.svn')):
return scm.SVN.CaptureLocalInfo([], self.checkout_path)['URL']
return None
def DoesRemoteURLMatch(self, options):
"""Determine whether the remote URL of this checkout is the expected URL."""
if not os.path.exists(self.checkout_path):
# A checkout which doesn't exist can't be broken.
return True
actual_remote_url = self.GetActualRemoteURL(options)
if actual_remote_url:
return (gclient_utils.SplitUrlRevision(actual_remote_url)[0].rstrip('/')
== gclient_utils.SplitUrlRevision(self.url)[0].rstrip('/'))
else:
# This may occur if the self.checkout_path exists but does not contain a
# valid git or svn checkout.
return False
def _DeleteOrMove(self, force):
"""Delete the checkout directory or move it out of the way.
Args:
force: bool; if True, delete the directory. Otherwise, just move it.
"""
if force and os.environ.get('CHROME_HEADLESS') == '1':
self.Print('_____ Conflicting directory found in %s. Removing.'
% self.checkout_path)
gclient_utils.AddWarning('Conflicting directory %s deleted.'
% self.checkout_path)
gclient_utils.rmtree(self.checkout_path)
else:
bad_scm_dir = os.path.join(self._root_dir, '_bad_scm',
os.path.dirname(self.relpath))
try:
os.makedirs(bad_scm_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
dest_path = tempfile.mkdtemp(
prefix=os.path.basename(self.relpath),
dir=bad_scm_dir)
self.Print('_____ Conflicting directory found in %s. Moving to %s.'
% (self.checkout_path, dest_path))
gclient_utils.AddWarning('Conflicting directory %s moved to %s.'
% (self.checkout_path, dest_path))
shutil.move(self.checkout_path, dest_path)
class GitWrapper(SCMWrapper):
"""Wrapper for Git"""
name = 'git'
remote = 'origin'
cache_dir = None
def __init__(self, url=None, *args):
"""Removes 'git+' fake prefix from git URL."""
if url.startswith('git+http://') or url.startswith('git+https://'):
url = url[4:]
SCMWrapper.__init__(self, url, *args)
filter_kwargs = { 'time_throttle': 1, 'out_fh': self.out_fh }
if self.out_cb:
filter_kwargs['predicate'] = self.out_cb
self.filter = gclient_utils.GitFilter(**filter_kwargs)
@staticmethod
def BinaryExists():
"""Returns true if the command exists."""
try:
# We assume git is newer than 1.7. See: crbug.com/114483
result, version = scm.GIT.AssertVersion('1.7')
if not result:
raise gclient_utils.Error('Git version is older than 1.7: %s' % version)
return result
except OSError:
return False
def GetCheckoutRoot(self):
return scm.GIT.GetCheckoutRoot(self.checkout_path)
def GetRevisionDate(self, _revision):
"""Returns the given revision's date in ISO-8601 format (which contains the
time zone)."""
# TODO(floitsch): get the time-stamp of the given revision and not just the
# time-stamp of the currently checked out revision.
return self._Capture(['log', '-n', '1', '--format=%ai'])
@staticmethod
def cleanup(options, args, file_list):
"""'Cleanup' the repo.
There's no real git equivalent for the svn cleanup command, do a no-op.
"""
def diff(self, options, _args, _file_list):
merge_base = self._Capture(['merge-base', 'HEAD', self.remote])
self._Run(['diff', merge_base], options)
def pack(self, _options, _args, _file_list):
"""Generates a patch file which can be applied to the root of the
repository.
The patch file is generated from a diff of the merge base of HEAD and
its upstream branch.
"""
merge_base = self._Capture(['merge-base', 'HEAD', self.remote])
gclient_utils.CheckCallAndFilter(
['git', 'diff', merge_base],
cwd=self.checkout_path,
filter_fn=GitDiffFilterer(self.relpath, print_func=self.Print).Filter)
def _FetchAndReset(self, revision, file_list, options):
"""Equivalent to git fetch; git reset."""
quiet = []
if not options.verbose:
quiet = ['--quiet']
self._UpdateBranchHeads(options, fetch=False)
self._Fetch(options, prune=True, quiet=options.verbose)
self._Run(['reset', '--hard', revision] + quiet, options)
if file_list is not None:
files = self._Capture(['ls-files']).splitlines()
file_list.extend([os.path.join(self.checkout_path, f) for f in files])
def _DisableHooks(self):
hook_dir = os.path.join(self.checkout_path, '.git', 'hooks')
if not os.path.isdir(hook_dir):
return
for f in os.listdir(hook_dir):
if not f.endswith('.sample') and not f.endswith('.disabled'):
disabled_hook_path = os.path.join(hook_dir, f + '.disabled')
if os.path.exists(disabled_hook_path):
os.remove(disabled_hook_path)
os.rename(os.path.join(hook_dir, f), disabled_hook_path)
def _maybe_break_locks(self, options):
"""This removes all .lock files from this repo's .git directory, if the
user passed the --break_repo_locks command line flag.
In particular, this will cleanup index.lock files, as well as ref lock
files.
"""
if options.break_repo_locks:
git_dir = os.path.join(self.checkout_path, '.git')
for path, _, filenames in os.walk(git_dir):
for filename in filenames:
if filename.endswith('.lock'):
to_break = os.path.join(path, filename)
self.Print('breaking lock: %s' % (to_break,))
try:
os.remove(to_break)
except OSError as ex:
self.Print('FAILED to break lock: %s: %s' % (to_break, ex))
raise
def update(self, options, args, file_list):
"""Runs git to update or transparently checkout the working copy.
All updated files will be appended to file_list.
Raises:
Error: if can't get URL for relative path.
"""
if args:
raise gclient_utils.Error("Unsupported argument(s): %s" % ",".join(args))
self._CheckMinVersion("1.6.6")
# If a dependency is not pinned, track the default remote branch.
default_rev = 'refs/remotes/%s/master' % self.remote
url, deps_revision = gclient_utils.SplitUrlRevision(self.url)
rev_str = ""
revision = deps_revision
managed = True
if options.revision:
# Override the revision number.
revision = str(options.revision)
if revision == 'unmanaged':
# Check again for a revision in case an initial ref was specified
# in the url, for example bla.git@refs/heads/custombranch
revision = deps_revision
managed = False
if not revision:
revision = default_rev
if managed:
self._DisableHooks()
if gclient_utils.IsDateRevision(revision):
# Date-revisions only work on git-repositories if the reflog hasn't
# expired yet. Use rev-list to get the corresponding revision.
# git rev-list -n 1 --before='time-stamp' branchname
if options.transitive:
self.Print('Warning: --transitive only works for SVN repositories.')
revision = default_rev
rev_str = ' at %s' % revision
files = [] if file_list is not None else None
printed_path = False
verbose = []
if options.verbose:
self.Print('_____ %s%s' % (self.relpath, rev_str), timestamp=False)
verbose = ['--verbose']
printed_path = True
remote_ref = scm.GIT.RefToRemoteRef(revision, self.remote)
if remote_ref:
# Rewrite remote refs to their local equivalents.
revision = ''.join(remote_ref)
rev_type = "branch"
elif revision.startswith('refs/'):
# Local branch? We probably don't want to support, since DEPS should
# always specify branches as they are in the upstream repo.
rev_type = "branch"
else:
# hash is also a tag, only make a distinction at checkout
rev_type = "hash"
mirror = self._GetMirror(url, options)
if mirror:
url = mirror.mirror_path
# If we are going to introduce a new project, there is a possibility that
# we are syncing back to a state where the project was originally a
# sub-project rolled by DEPS (realistic case: crossing the Blink merge point
# syncing backwards, when Blink was a DEPS entry and not part of src.git).
# In such case, we might have a backup of the former .git folder, which can
# be used to avoid re-fetching the entire repo again (useful for bisects).
backup_dir = self.GetGitBackupDirPath()
target_dir = os.path.join(self.checkout_path, '.git')
if os.path.exists(backup_dir) and not os.path.exists(target_dir):
gclient_utils.safe_makedirs(self.checkout_path)
os.rename(backup_dir, target_dir)
# Reset to a clean state
self._Run(['reset', '--hard', 'HEAD'], options)
if (not os.path.exists(self.checkout_path) or
(os.path.isdir(self.checkout_path) and
not os.path.exists(os.path.join(self.checkout_path, '.git')))):
if mirror:
self._UpdateMirror(mirror, options)
try:
self._Clone(revision, url, options)
except subprocess2.CalledProcessError:
self._DeleteOrMove(options.force)
self._Clone(revision, url, options)
if file_list is not None:
files = self._Capture(['ls-files']).splitlines()
file_list.extend([os.path.join(self.checkout_path, f) for f in files])
if not verbose:
# Make the output a little prettier. It's nice to have some whitespace
# between projects when cloning.
self.Print('')
return self._Capture(['rev-parse', '--verify', 'HEAD'])
if not managed:
self._UpdateBranchHeads(options, fetch=False)
self.Print('________ unmanaged solution; skipping %s' % self.relpath)
return self._Capture(['rev-parse', '--verify', 'HEAD'])
self._maybe_break_locks(options)
if mirror:
self._UpdateMirror(mirror, options)
# See if the url has changed (the unittests use git://foo for the url, let
# that through).
current_url = self._Capture(['config', 'remote.%s.url' % self.remote])
return_early = False
# TODO(maruel): Delete url != 'git://foo' since it's just to make the
# unit test pass. (and update the comment above)
# Skip url auto-correction if remote.origin.gclient-auto-fix-url is set.
# This allows devs to use experimental repos which have a different url
# but whose branch(s) are the same as official repos.
if (current_url.rstrip('/') != url.rstrip('/') and
url != 'git://foo' and
subprocess2.capture(
['git', 'config', 'remote.%s.gclient-auto-fix-url' % self.remote],
cwd=self.checkout_path).strip() != 'False'):
self.Print('_____ switching %s to a new upstream' % self.relpath)
if not (options.force or options.reset):
# Make sure it's clean
self._CheckClean(rev_str)
# Switch over to the new upstream
self._Run(['remote', 'set-url', self.remote, url], options)
if mirror:
with open(os.path.join(
self.checkout_path, '.git', 'objects', 'info', 'alternates'),
'w') as fh:
fh.write(os.path.join(url, 'objects'))
self._EnsureValidHeadObjectOrCheckout(revision, options, url)
self._FetchAndReset(revision, file_list, options)
return_early = True
else:
self._EnsureValidHeadObjectOrCheckout(revision, options, url)
if return_early:
return self._Capture(['rev-parse', '--verify', 'HEAD'])
cur_branch = self._GetCurrentBranch()
# Cases:
# 0) HEAD is detached. Probably from our initial clone.
# - make sure HEAD is contained by a named ref, then update.
# Cases 1-4. HEAD is a branch.
# 1) current branch is not tracking a remote branch (could be git-svn)
# - try to rebase onto the new hash or branch
# 2) current branch is tracking a remote branch with local committed
# changes, but the DEPS file switched to point to a hash
# - rebase those changes on top of the hash
# 3) current branch is tracking a remote branch w/or w/out changes, and
# no DEPS switch
# - see if we can FF, if not, prompt the user for rebase, merge, or stop
# 4) current branch is tracking a remote branch, but DEPS switches to a
# different remote branch, and
# a) current branch has no local changes, and --force:
# - checkout new branch
# b) current branch has local changes, and --force and --reset:
# - checkout new branch
# c) otherwise exit
# GetUpstreamBranch returns something like 'refs/remotes/origin/master' for
# a tracking branch
# or 'master' if not a tracking branch (it's based on a specific rev/hash)
# or it returns None if it couldn't find an upstream
if cur_branch is None:
upstream_branch = None
current_type = "detached"
logging.debug("Detached HEAD")
else:
upstream_branch = scm.GIT.GetUpstreamBranch(self.checkout_path)
if not upstream_branch or not upstream_branch.startswith('refs/remotes'):
current_type = "hash"
logging.debug("Current branch is not tracking an upstream (remote)"
" branch.")
elif upstream_branch.startswith('refs/remotes'):
current_type = "branch"
else:
raise gclient_utils.Error('Invalid Upstream: %s' % upstream_branch)
if not scm.GIT.IsValidRevision(self.checkout_path, revision, sha_only=True):
# Update the remotes first so we have all the refs.
remote_output = scm.GIT.Capture(['remote'] + verbose + ['update'],
cwd=self.checkout_path)
if verbose:
self.Print(remote_output)
self._UpdateBranchHeads(options, fetch=True)
# This is a big hammer, debatable if it should even be here...
if options.force or options.reset:
target = 'HEAD'
if options.upstream and upstream_branch:
target = upstream_branch
self._Run(['reset', '--hard', target], options)
if current_type == 'detached':
# case 0
if not options.force:
# Don't do this check if nuclear option is on.
self._CheckClean(rev_str)
self._CheckDetachedHead(rev_str, options)
if self._Capture(['rev-list', '-n', '1', 'HEAD']) == revision:
self.Print('Up-to-date; skipping checkout.')
else:
# 'git checkout' may need to overwrite existing untracked files. Allow
# it only when nuclear options are enabled.
self._Checkout(
options,
revision,
force=(options.force or options.delete_unversioned_trees),
quiet=True,
)
if not printed_path:
self.Print('_____ %s%s' % (self.relpath, rev_str), timestamp=False)
elif current_type == 'hash':
# case 1
if scm.GIT.IsGitSvn(self.checkout_path) and upstream_branch is not None:
# Our git-svn branch (upstream_branch) is our upstream
self._AttemptRebase(upstream_branch, files, options,
newbase=revision, printed_path=printed_path,
merge=options.merge)
printed_path = True
else:
# Can't find a merge-base since we don't know our upstream. That makes
# this command VERY likely to produce a rebase failure. For now we
# assume origin is our upstream since that's what the old behavior was.
upstream_branch = self.remote
if options.revision or deps_revision:
upstream_branch = revision
self._AttemptRebase(upstream_branch, files, options,
printed_path=printed_path, merge=options.merge)
printed_path = True
elif rev_type == 'hash':
# case 2
self._AttemptRebase(upstream_branch, files, options,
newbase=revision, printed_path=printed_path,
merge=options.merge)
printed_path = True
elif remote_ref and ''.join(remote_ref) != upstream_branch:
# case 4
new_base = ''.join(remote_ref)
if not printed_path:
self.Print('_____ %s%s' % (self.relpath, rev_str), timestamp=False)
switch_error = ("Could not switch upstream branch from %s to %s\n"
% (upstream_branch, new_base) +
"Please use --force or merge or rebase manually:\n" +
"cd %s; git rebase %s\n" % (self.checkout_path, new_base) +
"OR git checkout -b <some new branch> %s" % new_base)
force_switch = False
if options.force:
try:
self._CheckClean(rev_str)
# case 4a
force_switch = True
except gclient_utils.Error as e:
if options.reset:
# case 4b
force_switch = True
else:
switch_error = '%s\n%s' % (e.message, switch_error)
if force_switch:
self.Print("Switching upstream branch from %s to %s" %
(upstream_branch, new_base))
switch_branch = 'gclient_' + remote_ref[1]
self._Capture(['branch', '-f', switch_branch, new_base])
self._Checkout(options, switch_branch, force=True, quiet=True)
else:
# case 4c
raise gclient_utils.Error(switch_error)
else:
# case 3 - the default case
if files is not None:
files = self._Capture(['diff', upstream_branch, '--name-only']).split()
if verbose:
self.Print('Trying fast-forward merge to branch : %s' % upstream_branch)
try:
merge_args = ['merge']
if options.merge:
merge_args.append('--ff')
else:
merge_args.append('--ff-only')
merge_args.append(upstream_branch)
merge_output = self._Capture(merge_args)
except subprocess2.CalledProcessError as e:
if re.match('fatal: Not possible to fast-forward, aborting.', e.stderr):
files = []
if not printed_path:
self.Print('_____ %s%s' % (self.relpath, rev_str), timestamp=False)
printed_path = True
while True:
if not options.auto_rebase:
try:
action = self._AskForData(
'Cannot %s, attempt to rebase? '
'(y)es / (q)uit / (s)kip : ' %
('merge' if options.merge else 'fast-forward merge'),
options)
except ValueError:
raise gclient_utils.Error('Invalid Character')
if options.auto_rebase or re.match(r'yes|y', action, re.I):
self._AttemptRebase(upstream_branch, files, options,
printed_path=printed_path, merge=False)
printed_path = True
break
elif re.match(r'quit|q', action, re.I):
raise gclient_utils.Error("Can't fast-forward, please merge or "
"rebase manually.\n"
"cd %s && git " % self.checkout_path
+ "rebase %s" % upstream_branch)
elif re.match(r'skip|s', action, re.I):
self.Print('Skipping %s' % self.relpath)
return
else:
self.Print('Input not recognized')
elif re.match("error: Your local changes to '.*' would be "
"overwritten by merge. Aborting.\nPlease, commit your "
"changes or stash them before you can merge.\n",
e.stderr):
if not printed_path:
self.Print('_____ %s%s' % (self.relpath, rev_str), timestamp=False)
printed_path = True
raise gclient_utils.Error(e.stderr)
else:
# Some other problem happened with the merge
logging.error("Error during fast-forward merge in %s!" % self.relpath)
self.Print(e.stderr)
raise
else:
# Fast-forward merge was successful
if not re.match('Already up-to-date.', merge_output) or verbose:
if not printed_path:
self.Print('_____ %s%s' % (self.relpath, rev_str), timestamp=False)
printed_path = True
self.Print(merge_output.strip())
if not verbose:
# Make the output a little prettier. It's nice to have some
# whitespace between projects when syncing.
self.Print('')
if file_list is not None:
file_list.extend([os.path.join(self.checkout_path, f) for f in files])
# If the rebase generated a conflict, abort and ask user to fix
if self._IsRebasing():
raise gclient_utils.Error('\n____ %s%s\n'
'\nConflict while rebasing this branch.\n'
'Fix the conflict and run gclient again.\n'
'See man git-rebase for details.\n'
% (self.relpath, rev_str))
if verbose:
self.Print('Checked out revision %s' % self.revinfo(options, (), None),
timestamp=False)
# If --reset and --delete_unversioned_trees are specified, remove any
# untracked directories.
if options.reset and options.delete_unversioned_trees:
# GIT.CaptureStatus() uses 'dit diff' to compare to a specific SHA1 (the
# merge-base by default), so doesn't include untracked files. So we use
# 'git ls-files --directory --others --exclude-standard' here directly.
paths = scm.GIT.Capture(
['ls-files', '--directory', '--others', '--exclude-standard'],
self.checkout_path)
for path in (p for p in paths.splitlines() if p.endswith('/')):
full_path = os.path.join(self.checkout_path, path)
if not os.path.islink(full_path):
self.Print('_____ removing unversioned directory %s' % path)
gclient_utils.rmtree(full_path)
return self._Capture(['rev-parse', '--verify', 'HEAD'])
def revert(self, options, _args, file_list):
"""Reverts local modifications.
All reverted files will be appended to file_list.
"""
if not os.path.isdir(self.checkout_path):
# revert won't work if the directory doesn't exist. It needs to
# checkout instead.
self.Print('_____ %s is missing, synching instead' % self.relpath)
# Don't reuse the args.
return self.update(options, [], file_list)
default_rev = "refs/heads/master"
if options.upstream:
if self._GetCurrentBranch():
upstream_branch = scm.GIT.GetUpstreamBranch(self.checkout_path)
default_rev = upstream_branch or default_rev
_, deps_revision = gclient_utils.SplitUrlRevision(self.url)
if not deps_revision:
deps_revision = default_rev
if deps_revision.startswith('refs/heads/'):
deps_revision = deps_revision.replace('refs/heads/', self.remote + '/')
try:
deps_revision = self.GetUsableRev(deps_revision, options)
except NoUsableRevError as e:
# If the DEPS entry's url and hash changed, try to update the origin.
# See also http://crbug.com/520067.
logging.warn(
'Couldn\'t find usable revision, will retrying to update instead: %s',
e.message)
return self.update(options, [], file_list)
if file_list is not None:
files = self._Capture(['diff', deps_revision, '--name-only']).split()
self._Run(['reset', '--hard', deps_revision], options)
self._Run(['clean', '-f', '-d'], options)
if file_list is not None:
file_list.extend([os.path.join(self.checkout_path, f) for f in files])
def revinfo(self, _options, _args, _file_list):
"""Returns revision"""
return self._Capture(['rev-parse', 'HEAD'])
def runhooks(self, options, args, file_list):
self.status(options, args, file_list)
def status(self, options, _args, file_list):
"""Display status information."""
if not os.path.isdir(self.checkout_path):
self.Print('________ couldn\'t run status in %s:\n'
'The directory does not exist.' % self.checkout_path)
else:
merge_base = self._Capture(['merge-base', 'HEAD', self.remote])
self._Run(['diff', '--name-status', merge_base], options,
stdout=self.out_fh)
if file_list is not None:
files = self._Capture(['diff', '--name-only', merge_base]).split()
file_list.extend([os.path.join(self.checkout_path, f) for f in files])
def GetUsableRev(self, rev, options):
"""Finds a useful revision for this repository.
If SCM is git-svn and the head revision is less than |rev|, git svn fetch
will be called on the source."""
sha1 = None
if not os.path.isdir(self.checkout_path):
raise NoUsableRevError(
( 'We could not find a valid hash for safesync_url response "%s".\n'
'Safesync URLs with a git checkout currently require the repo to\n'
'be cloned without a safesync_url before adding the safesync_url.\n'
'For more info, see: '
'http://code.google.com/p/chromium/wiki/UsingNewGit'
'#Initial_checkout' ) % rev)
elif rev.isdigit() and len(rev) < 7:
# Handles an SVN rev. As an optimization, only verify an SVN revision as
# [0-9]{1,6} for now to avoid making a network request.
if scm.GIT.IsGitSvn(cwd=self.checkout_path):
local_head = scm.GIT.GetGitSvnHeadRev(cwd=self.checkout_path)
if not local_head or local_head < int(rev):
try:
logging.debug('Looking for git-svn configuration optimizations.')
if scm.GIT.Capture(['config', '--get', 'svn-remote.svn.fetch'],
cwd=self.checkout_path):
self._Fetch(options)
except subprocess2.CalledProcessError:
logging.debug('git config --get svn-remote.svn.fetch failed, '
'ignoring possible optimization.')
if options.verbose:
self.Print('Running git svn fetch. This might take a while.\n')
scm.GIT.Capture(['svn', 'fetch'], cwd=self.checkout_path)
try:
sha1 = scm.GIT.GetBlessedSha1ForSvnRev(
cwd=self.checkout_path, rev=rev)
except gclient_utils.Error, e:
sha1 = e.message
self.Print('Warning: Could not find a git revision with accurate\n'
'.DEPS.git that maps to SVN revision %s. Sync-ing to\n'
'the closest sane git revision, which is:\n'
' %s\n' % (rev, e.message))
if not sha1:
raise NoUsableRevError(
( 'It appears that either your git-svn remote is incorrectly\n'
'configured or the revision in your safesync_url is\n'
'higher than git-svn remote\'s HEAD as we couldn\'t find a\n'
'corresponding git hash for SVN rev %s.' ) % rev)
else:
if scm.GIT.IsValidRevision(cwd=self.checkout_path, rev=rev):
sha1 = rev
else:
# May exist in origin, but we don't have it yet, so fetch and look
# again.
self._Fetch(options)
if scm.GIT.IsValidRevision(cwd=self.checkout_path, rev=rev):
sha1 = rev
if not sha1:
raise NoUsableRevError(
( 'We could not find a valid hash for safesync_url response "%s".\n'
'Safesync URLs with a git checkout currently require a git-svn\n'
'remote or a safesync_url that provides git sha1s. Please add a\n'
'git-svn remote or change your safesync_url. For more info, see:\n'
'http://code.google.com/p/chromium/wiki/UsingNewGit'
'#Initial_checkout' ) % rev)
return sha1
def FullUrlForRelativeUrl(self, url):
# Strip from last '/'
# Equivalent to unix basename
base_url = self.url
return base_url[:base_url.rfind('/')] + url
def GetGitBackupDirPath(self):
"""Returns the path where the .git folder for the current project can be
staged/restored. Use case: subproject moved from DEPS <-> outer project."""
return os.path.join(self._root_dir,
'old_' + self.relpath.replace(os.sep, '_')) + '.git'
def _GetMirror(self, url, options):
"""Get a git_cache.Mirror object for the argument url."""
if not git_cache.Mirror.GetCachePath():
return None
mirror_kwargs = {
'print_func': self.filter,
'refs': []
}
if hasattr(options, 'with_branch_heads') and options.with_branch_heads:
mirror_kwargs['refs'].append('refs/branch-heads/*')
if hasattr(options, 'with_tags') and options.with_tags:
mirror_kwargs['refs'].append('refs/tags/*')
return git_cache.Mirror(url, **mirror_kwargs)
@staticmethod
def _UpdateMirror(mirror, options):
"""Update a git mirror by fetching the latest commits from the remote."""
if getattr(options, 'shallow', False):
# HACK(hinoka): These repositories should be super shallow.
if 'flash' in mirror.url:
depth = 10
else:
depth = 10000
else:
depth = None
mirror.populate(verbose=options.verbose,
bootstrap=not getattr(options, 'no_bootstrap', False),
depth=depth,
ignore_lock=getattr(options, 'ignore_locks', False),
lock_timeout=getattr(options, 'lock_timeout', 0))
mirror.unlock()
def _Clone(self, revision, url, options):
"""Clone a git repository from the given URL.
Once we've cloned the repo, we checkout a working branch if the specified
revision is a branch head. If it is a tag or a specific commit, then we
leave HEAD detached as it makes future updates simpler -- in this case the
user should first create a new branch or switch to an existing branch before
making changes in the repo."""
if not options.verbose:
# git clone doesn't seem to insert a newline properly before printing
# to stdout
self.Print('')
cfg = gclient_utils.DefaultIndexPackConfig(url)
clone_cmd = cfg + ['clone', '--no-checkout', '--progress']
if self.cache_dir:
clone_cmd.append('--shared')
if options.verbose:
clone_cmd.append('--verbose')
clone_cmd.append(url)
# If the parent directory does not exist, Git clone on Windows will not
# create it, so we need to do it manually.
parent_dir = os.path.dirname(self.checkout_path)
gclient_utils.safe_makedirs(parent_dir)
template_dir = None
if hasattr(options, 'no_history') and options.no_history:
if gclient_utils.IsGitSha(revision):
# In the case of a subproject, the pinned sha is not necessarily the
# head of the remote branch (so we can't just use --depth=N). Instead,
# we tell git to fetch all the remote objects from SHA..HEAD by means of
# a template git dir which has a 'shallow' file pointing to the sha.
template_dir = tempfile.mkdtemp(
prefix='_gclient_gittmp_%s' % os.path.basename(self.checkout_path),
dir=parent_dir)
self._Run(['init', '--bare', template_dir], options, cwd=self._root_dir)
with open(os.path.join(template_dir, 'shallow'), 'w') as template_file:
template_file.write(revision)
clone_cmd.append('--template=' + template_dir)
else:
# Otherwise, we're just interested in the HEAD. Just use --depth.
clone_cmd.append('--depth=1')
tmp_dir = tempfile.mkdtemp(
prefix='_gclient_%s_' % os.path.basename(self.checkout_path),
dir=parent_dir)
try:
clone_cmd.append(tmp_dir)
self._Run(clone_cmd, options, cwd=self._root_dir, retry=True)
gclient_utils.safe_makedirs(self.checkout_path)
gclient_utils.safe_rename(os.path.join(tmp_dir, '.git'),
os.path.join(self.checkout_path, '.git'))
except:
traceback.print_exc(file=self.out_fh)
raise
finally:
if os.listdir(tmp_dir):
self.Print('_____ removing non-empty tmp dir %s' % tmp_dir)
gclient_utils.rmtree(tmp_dir)
if template_dir:
gclient_utils.rmtree(template_dir)
self._UpdateBranchHeads(options, fetch=True)
remote_ref = scm.GIT.RefToRemoteRef(revision, self.remote)
self._Checkout(options, ''.join(remote_ref or revision), quiet=True)
if self._GetCurrentBranch() is None:
# Squelch git's very verbose detached HEAD warning and use our own
self.Print(
('Checked out %s to a detached HEAD. Before making any commits\n'
'in this repo, you should use \'git checkout <branch>\' to switch to\n'
'an existing branch or use \'git checkout %s -b <branch>\' to\n'
'create a new branch for your work.') % (revision, self.remote))
def _AskForData(self, prompt, options):
if options.jobs > 1:
self.Print(prompt)
raise gclient_utils.Error("Background task requires input. Rerun "
"gclient with --jobs=1 so that\n"
"interaction is possible.")
try:
return raw_input(prompt)
except KeyboardInterrupt:
# Hide the exception.
sys.exit(1)
def _AttemptRebase(self, upstream, files, options, newbase=None,
branch=None, printed_path=False, merge=False):
"""Attempt to rebase onto either upstream or, if specified, newbase."""
if files is not None:
files.extend(self._Capture(['diff', upstream, '--name-only']).split())
revision = upstream
if newbase:
revision = newbase
action = 'merge' if merge else 'rebase'
if not printed_path:
self.Print('_____ %s : Attempting %s onto %s...' % (
self.relpath, action, revision))
printed_path = True
else:
self.Print('Attempting %s onto %s...' % (action, revision))
if merge:
merge_output = self._Capture(['merge', revision])
if options.verbose:
self.Print(merge_output)
return
# Build the rebase command here using the args
# git rebase [options] [--onto <newbase>] <upstream> [<branch>]
rebase_cmd = ['rebase']
if options.verbose:
rebase_cmd.append('--verbose')
if newbase:
rebase_cmd.extend(['--onto', newbase])
rebase_cmd.append(upstream)
if branch:
rebase_cmd.append(branch)
try:
rebase_output = scm.GIT.Capture(rebase_cmd, cwd=self.checkout_path)
except subprocess2.CalledProcessError, e:
if (re.match(r'cannot rebase: you have unstaged changes', e.stderr) or
re.match(r'cannot rebase: your index contains uncommitted changes',
e.stderr)):
while True:
rebase_action = self._AskForData(
'Cannot rebase because of unstaged changes.\n'
'\'git reset --hard HEAD\' ?\n'
'WARNING: destroys any uncommitted work in your current branch!'
' (y)es / (q)uit / (s)how : ', options)
if re.match(r'yes|y', rebase_action, re.I):
self._Run(['reset', '--hard', 'HEAD'], options)
# Should this be recursive?
rebase_output = scm.GIT.Capture(rebase_cmd, cwd=self.checkout_path)
break
elif re.match(r'quit|q', rebase_action, re.I):
raise gclient_utils.Error("Please merge or rebase manually\n"
"cd %s && git " % self.checkout_path
+ "%s" % ' '.join(rebase_cmd))
elif re.match(r'show|s', rebase_action, re.I):
self.Print('%s' % e.stderr.strip())
continue
else:
gclient_utils.Error("Input not recognized")
continue
elif re.search(r'^CONFLICT', e.stdout, re.M):
raise gclient_utils.Error("Conflict while rebasing this branch.\n"
"Fix the conflict and run gclient again.\n"
"See 'man git-rebase' for details.\n")
else:
self.Print(e.stdout.strip())
self.Print('Rebase produced error output:\n%s' % e.stderr.strip())
raise gclient_utils.Error("Unrecognized error, please merge or rebase "
"manually.\ncd %s && git " %
self.checkout_path
+ "%s" % ' '.join(rebase_cmd))
self.Print(rebase_output.strip())
if not options.verbose:
# Make the output a little prettier. It's nice to have some
# whitespace between projects when syncing.
self.Print('')
@staticmethod
def _CheckMinVersion(min_version):
(ok, current_version) = scm.GIT.AssertVersion(min_version)
if not ok:
raise gclient_utils.Error('git version %s < minimum required %s' %
(current_version, min_version))
def _EnsureValidHeadObjectOrCheckout(self, revision, options, url):
# Special case handling if all 3 conditions are met:
# * the mirros have recently changed, but deps destination remains same,
# * the git histories of mirrors are conflicting.
# * git cache is used
# This manifests itself in current checkout having invalid HEAD commit on
# most git operations. Since git cache is used, just deleted the .git
# folder, and re-create it by cloning.
try:
self._Capture(['rev-list', '-n', '1', 'HEAD'])
except subprocess2.CalledProcessError as e:
if ('fatal: bad object HEAD' in e.stderr
and self.cache_dir and self.cache_dir in url):
self.Print((
'Likely due to DEPS change with git cache_dir, '
'the current commit points to no longer existing object.\n'
'%s' % e)
)
self._DeleteOrMove(options.force)
self._Clone(revision, url, options)
else:
raise
def _IsRebasing(self):
# Check for any of REBASE-i/REBASE-m/REBASE/AM. Unfortunately git doesn't
# have a plumbing command to determine whether a rebase is in progress, so
# for now emualate (more-or-less) git-rebase.sh / git-completion.bash
g = os.path.join(self.checkout_path, '.git')
return (
os.path.isdir(os.path.join(g, "rebase-merge")) or
os.path.isdir(os.path.join(g, "rebase-apply")))
def _CheckClean(self, rev_str):
lockfile = os.path.join(self.checkout_path, ".git", "index.lock")
if os.path.exists(lockfile):
raise gclient_utils.Error(
'\n____ %s%s\n'
'\tYour repo is locked, possibly due to a concurrent git process.\n'
'\tIf no git executable is running, then clean up %r and try again.\n'
% (self.relpath, rev_str, lockfile))
# Make sure the tree is clean; see git-rebase.sh for reference
try:
scm.GIT.Capture(['update-index', '--ignore-submodules', '--refresh'],
cwd=self.checkout_path)
except subprocess2.CalledProcessError:
raise gclient_utils.Error('\n____ %s%s\n'
'\tYou have unstaged changes.\n'
'\tPlease commit, stash, or reset.\n'
% (self.relpath, rev_str))
try:
scm.GIT.Capture(['diff-index', '--cached', '--name-status', '-r',
'--ignore-submodules', 'HEAD', '--'],
cwd=self.checkout_path)
except subprocess2.CalledProcessError:
raise gclient_utils.Error('\n____ %s%s\n'
'\tYour index contains uncommitted changes\n'
'\tPlease commit, stash, or reset.\n'
% (self.relpath, rev_str))
def _CheckDetachedHead(self, rev_str, _options):
# HEAD is detached. Make sure it is safe to move away from (i.e., it is
# reference by a commit). If not, error out -- most likely a rebase is
# in progress, try to detect so we can give a better error.
try:
scm.GIT.Capture(['name-rev', '--no-undefined', 'HEAD'],
cwd=self.checkout_path)
except subprocess2.CalledProcessError:
# Commit is not contained by any rev. See if the user is rebasing:
if self._IsRebasing():
# Punt to the user
raise gclient_utils.Error('\n____ %s%s\n'
'\tAlready in a conflict, i.e. (no branch).\n'
'\tFix the conflict and run gclient again.\n'
'\tOr to abort run:\n\t\tgit-rebase --abort\n'
'\tSee man git-rebase for details.\n'
% (self.relpath, rev_str))
# Let's just save off the commit so we can proceed.
name = ('saved-by-gclient-' +
self._Capture(['rev-parse', '--short', 'HEAD']))
self._Capture(['branch', '-f', name])
self.Print('_____ found an unreferenced commit and saved it as \'%s\'' %
name)
def _GetCurrentBranch(self):
# Returns name of current branch or None for detached HEAD
branch = self._Capture(['rev-parse', '--abbrev-ref=strict', 'HEAD'])
if branch == 'HEAD':
return None
return branch
def _Capture(self, args, **kwargs):
kwargs.setdefault('cwd', self.checkout_path)
kwargs.setdefault('stderr', subprocess2.PIPE)
env = scm.GIT.ApplyEnvVars(kwargs)
return subprocess2.check_output(['git'] + args, env=env, **kwargs).strip()
def _Checkout(self, options, ref, force=False, quiet=None):
"""Performs a 'git-checkout' operation.
Args:
options: The configured option set
ref: (str) The branch/commit to checkout
quiet: (bool/None) Whether or not the checkout shoud pass '--quiet'; if
'None', the behavior is inferred from 'options.verbose'.
Returns: (str) The output of the checkout operation
"""
if quiet is None:
quiet = (not options.verbose)
checkout_args = ['checkout']
if force:
checkout_args.append('--force')
if quiet:
checkout_args.append('--quiet')
checkout_args.append(ref)
return self._Capture(checkout_args)
def _Fetch(self, options, remote=None, prune=False, quiet=False):
cfg = gclient_utils.DefaultIndexPackConfig(self.url)
fetch_cmd = cfg + [
'fetch',
remote or self.remote,
]
if prune:
fetch_cmd.append('--prune')
if options.verbose:
fetch_cmd.append('--verbose')
elif quiet:
fetch_cmd.append('--quiet')
self._Run(fetch_cmd, options, show_header=options.verbose, retry=True)
# Return the revision that was fetched; this will be stored in 'FETCH_HEAD'
return self._Capture(['rev-parse', '--verify', 'FETCH_HEAD'])
def _UpdateBranchHeads(self, options, fetch=False):
"""Adds, and optionally fetches, "branch-heads" and "tags" refspecs
if requested."""
need_fetch = fetch
if hasattr(options, 'with_branch_heads') and options.with_branch_heads:
config_cmd = ['config', 'remote.%s.fetch' % self.remote,
'+refs/branch-heads/*:refs/remotes/branch-heads/*',
'^\\+refs/branch-heads/\\*:.*$']
self._Run(config_cmd, options)
need_fetch = True
if hasattr(options, 'with_tags') and options.with_tags:
config_cmd = ['config', 'remote.%s.fetch' % self.remote,
'+refs/tags/*:refs/tags/*',
'^\\+refs/tags/\\*:.*$']
self._Run(config_cmd, options)
need_fetch = True
if fetch and need_fetch:
self._Fetch(options)
def _Run(self, args, options, show_header=True, **kwargs):
# Disable 'unused options' warning | pylint: disable=W0613
kwargs.setdefault('cwd', self.checkout_path)
kwargs.setdefault('stdout', self.out_fh)
kwargs['filter_fn'] = self.filter
kwargs.setdefault('print_stdout', False)
env = scm.GIT.ApplyEnvVars(kwargs)
cmd = ['git'] + args
if show_header:
gclient_utils.CheckCallAndFilterAndHeader(cmd, env=env, **kwargs)
else:
gclient_utils.CheckCallAndFilter(cmd, env=env, **kwargs)
class SVNWrapper(SCMWrapper):
""" Wrapper for SVN """
name = 'svn'
_PRINTED_DEPRECATION = False
_MESSAGE = (
'Oh hai! You are using subversion. Chrome infra is eager to get rid of',
'svn support so please switch to git.',
'Tracking bug: http://crbug.com/475320',
'If you are a project owner, you may request git migration assistance at: ',
' https://code.google.com/p/chromium/issues/entry?template=Infra-Git')
def __init__(self, *args, **kwargs):
super(SVNWrapper, self).__init__(*args, **kwargs)
suppress_deprecated_notice = os.environ.get(
'SUPPRESS_DEPRECATED_SVN_NOTICE', False)
if not SVNWrapper._PRINTED_DEPRECATION and not suppress_deprecated_notice:
SVNWrapper._PRINTED_DEPRECATION = True
sys.stderr.write('\n'.join(self._MESSAGE) + '\n')
@staticmethod
def BinaryExists():
"""Returns true if the command exists."""
try:
result, version = scm.SVN.AssertVersion('1.4')
if not result:
raise gclient_utils.Error('SVN version is older than 1.4: %s' % version)
return result
except OSError:
return False
def GetCheckoutRoot(self):
return scm.SVN.GetCheckoutRoot(self.checkout_path)
def GetRevisionDate(self, revision):
"""Returns the given revision's date in ISO-8601 format (which contains the
time zone)."""
date = scm.SVN.Capture(
['propget', '--revprop', 'svn:date', '-r', revision],
os.path.join(self.checkout_path, '.'))
return date.strip()
def cleanup(self, options, args, _file_list):
"""Cleanup working copy."""
self._Run(['cleanup'] + args, options)
def diff(self, options, args, _file_list):
# NOTE: This function does not currently modify file_list.
if not os.path.isdir(self.checkout_path):
raise gclient_utils.Error('Directory %s is not present.' %
self.checkout_path)
self._Run(['diff'] + args, options)
def pack(self, _options, args, _file_list):
"""Generates a patch file which can be applied to the root of the
repository."""
if not os.path.isdir(self.checkout_path):
raise gclient_utils.Error('Directory %s is not present.' %
self.checkout_path)
gclient_utils.CheckCallAndFilter(
['svn', 'diff', '-x', '--ignore-eol-style'] + args,
cwd=self.checkout_path,
print_stdout=False,
filter_fn=SvnDiffFilterer(self.relpath, print_func=self.Print).Filter)
def update(self, options, args, file_list):
"""Runs svn to update or transparently checkout the working copy.
All updated files will be appended to file_list.
Raises:
Error: if can't get URL for relative path.
"""
# Only update if hg is not controlling the directory.
hg_path = os.path.join(self.checkout_path, '.hg')
if os.path.exists(hg_path):
self.Print('________ found .hg directory; skipping %s' % self.relpath)
return
if args:
raise gclient_utils.Error("Unsupported argument(s): %s" % ",".join(args))
# revision is the revision to match. It is None if no revision is specified,
# i.e. the 'deps ain't pinned'.
url, revision = gclient_utils.SplitUrlRevision(self.url)
# Keep the original unpinned url for reference in case the repo is switched.
base_url = url
managed = True
if options.revision:
# Override the revision number.
revision = str(options.revision)
if revision:
if revision != 'unmanaged':
forced_revision = True
# Reconstruct the url.
url = '%s@%s' % (url, revision)
rev_str = ' at %s' % revision
else:
managed = False
revision = None
else:
forced_revision = False
rev_str = ''
exists = os.path.exists(self.checkout_path)
if exists and managed:
# Git is only okay if it's a git-svn checkout of the right repo.
if scm.GIT.IsGitSvn(self.checkout_path):
remote_url = scm.GIT.Capture(['config', '--local', '--get',
'svn-remote.svn.url'],
cwd=self.checkout_path).rstrip()
if remote_url.rstrip('/') == base_url.rstrip('/'):
self.Print('\n_____ %s looks like a git-svn checkout. Skipping.'
% self.relpath)
return # TODO(borenet): Get the svn revision number?
# Get the existing scm url and the revision number of the current checkout.
if exists and managed:
try:
from_info = scm.SVN.CaptureLocalInfo(
[], os.path.join(self.checkout_path, '.'))
except (gclient_utils.Error, subprocess2.CalledProcessError):
self._DeleteOrMove(options.force)
exists = False
BASE_URLS = {
'/chrome/trunk/src': 'gs://chromium-svn-checkout/chrome/',
'/blink/trunk': 'gs://chromium-svn-checkout/blink/',
}
WHITELISTED_ROOTS = [
'svn://svn.chromium.org',
'svn://svn-mirror.golo.chromium.org',
]
if not exists:
try:
# Split out the revision number since it's not useful for us.
base_path = urlparse.urlparse(url).path.split('@')[0]
# Check to see if we're on a whitelisted root. We do this because
# only some svn servers have matching UUIDs.
local_parsed = urlparse.urlparse(url)
local_root = '%s://%s' % (local_parsed.scheme, local_parsed.netloc)
if ('CHROME_HEADLESS' in os.environ
and sys.platform == 'linux2' # TODO(hinoka): Enable for win/mac.
and base_path in BASE_URLS
and local_root in WHITELISTED_ROOTS):
# Use a tarball for initial sync if we are on a bot.
# Get an unauthenticated gsutil instance.
gsutil = download_from_google_storage.Gsutil(
GSUTIL_DEFAULT_PATH, boto_path=os.devnull)
gs_path = BASE_URLS[base_path]
_, out, _ = gsutil.check_call('ls', gs_path)
# So that we can get the most recent revision.
sorted_items = sorted(out.splitlines())
latest_checkout = sorted_items[-1]
tempdir = tempfile.mkdtemp()
self.Print('Downloading %s...' % latest_checkout)
code, out, err = gsutil.check_call('cp', latest_checkout, tempdir)
if code:
self.Print('%s\n%s' % (out, err))
raise Exception()
filename = latest_checkout.split('/')[-1]
tarball = os.path.join(tempdir, filename)
self.Print('Unpacking into %s...' % self.checkout_path)
gclient_utils.safe_makedirs(self.checkout_path)
# TODO(hinoka): Use 7z for windows.
cmd = ['tar', '--extract', '--ungzip',
'--directory', self.checkout_path,
'--file', tarball]
gclient_utils.CheckCallAndFilter(
cmd, stdout=sys.stdout, print_stdout=True)
self.Print('Deleting temp file')
gclient_utils.rmtree(tempdir)
# Rewrite the repository root to match.
tarball_url = scm.SVN.CaptureLocalInfo(
['.'], self.checkout_path)['Repository Root']
tarball_parsed = urlparse.urlparse(tarball_url)
tarball_root = '%s://%s' % (tarball_parsed.scheme,
tarball_parsed.netloc)
if tarball_root != local_root:
self.Print('Switching repository root to %s' % local_root)
self._Run(['switch', '--relocate', tarball_root,
local_root, self.checkout_path],
options)
except Exception as e:
self.Print('We tried to get a source tarball but failed.')
self.Print('Resuming normal operations.')
self.Print(str(e))
gclient_utils.safe_makedirs(os.path.dirname(self.checkout_path))
# We need to checkout.
command = ['checkout', url, self.checkout_path]
command = self._AddAdditionalUpdateFlags(command, options, revision)
self._RunAndGetFileList(command, options, file_list, self._root_dir)
return self.Svnversion()
if not managed:
self.Print(('________ unmanaged solution; skipping %s' % self.relpath))
if os.path.exists(os.path.join(self.checkout_path, '.svn')):
return self.Svnversion()
return
if 'URL' not in from_info:
raise gclient_utils.Error(
('gclient is confused. Couldn\'t get the url for %s.\n'
'Try using @unmanaged.\n%s') % (
self.checkout_path, from_info))
# Look for locked directories.
dir_info = scm.SVN.CaptureStatus(
None, os.path.join(self.checkout_path, '.'))
if any(d[0][2] == 'L' for d in dir_info):
try:
self._Run(['cleanup', self.checkout_path], options)
except subprocess2.CalledProcessError, e:
# Get the status again, svn cleanup may have cleaned up at least
# something.
dir_info = scm.SVN.CaptureStatus(
None, os.path.join(self.checkout_path, '.'))
# Try to fix the failures by removing troublesome files.
for d in dir_info:
if d[0][2] == 'L':
if d[0][0] == '!' and options.force:
# We don't pass any files/directories to CaptureStatus and set
# cwd=self.checkout_path, so we should get relative paths here.
assert not os.path.isabs(d[1])
path_to_remove = os.path.normpath(
os.path.join(self.checkout_path, d[1]))
self.Print('Removing troublesome path %s' % path_to_remove)
gclient_utils.rmtree(path_to_remove)
else:
self.Print(
'Not removing troublesome path %s automatically.' % d[1])
if d[0][0] == '!':
self.Print('You can pass --force to enable automatic removal.')
raise e
# Retrieve the current HEAD version because svn is slow at null updates.
if options.manually_grab_svn_rev and not revision:
from_info_live = scm.SVN.CaptureRemoteInfo(from_info['URL'])
revision = str(from_info_live['Revision'])
rev_str = ' at %s' % revision
if from_info['URL'].rstrip('/') != base_url.rstrip('/'):
# The repository url changed, need to switch.
try:
to_info = scm.SVN.CaptureRemoteInfo(url)
except (gclient_utils.Error, subprocess2.CalledProcessError):
# The url is invalid or the server is not accessible, it's safer to bail
# out right now.
raise gclient_utils.Error('This url is unreachable: %s' % url)
can_switch = ((from_info['Repository Root'] != to_info['Repository Root'])
and (from_info['UUID'] == to_info['UUID']))
if can_switch:
self.Print('_____ relocating %s to a new checkout' % self.relpath)
# We have different roots, so check if we can switch --relocate.
# Subversion only permits this if the repository UUIDs match.
# Perform the switch --relocate, then rewrite the from_url
# to reflect where we "are now." (This is the same way that
# Subversion itself handles the metadata when switch --relocate
# is used.) This makes the checks below for whether we
# can update to a revision or have to switch to a different
# branch work as expected.
# TODO(maruel): TEST ME !
command = ['switch', '--relocate',
from_info['Repository Root'],
to_info['Repository Root'],
self.relpath]
self._Run(command, options, cwd=self._root_dir)
from_info['URL'] = from_info['URL'].replace(
from_info['Repository Root'],
to_info['Repository Root'])
else:
if not options.force and not options.reset:
# Look for local modifications but ignore unversioned files.
for status in scm.SVN.CaptureStatus(None, self.checkout_path):
if status[0][0] != '?':
raise gclient_utils.Error(
('Can\'t switch the checkout to %s; UUID don\'t match and '
'there is local changes in %s. Delete the directory and '
'try again.') % (url, self.checkout_path))
# Ok delete it.
self.Print('_____ switching %s to a new checkout' % self.relpath)
gclient_utils.rmtree(self.checkout_path)
# We need to checkout.
command = ['checkout', url, self.checkout_path]
command = self._AddAdditionalUpdateFlags(command, options, revision)
self._RunAndGetFileList(command, options, file_list, self._root_dir)
return self.Svnversion()
# If the provided url has a revision number that matches the revision
# number of the existing directory, then we don't need to bother updating.
if not options.force and str(from_info['Revision']) == revision:
if options.verbose or not forced_revision:
self.Print('_____ %s%s' % (self.relpath, rev_str), timestamp=False)
else:
command = ['update', self.checkout_path]
command = self._AddAdditionalUpdateFlags(command, options, revision)
self._RunAndGetFileList(command, options, file_list, self._root_dir)
# If --reset and --delete_unversioned_trees are specified, remove any
# untracked files and directories.
if options.reset and options.delete_unversioned_trees:
for status in scm.SVN.CaptureStatus(None, self.checkout_path):
full_path = os.path.join(self.checkout_path, status[1])
if (status[0][0] == '?'
and os.path.isdir(full_path)
and not os.path.islink(full_path)):
self.Print('_____ removing unversioned directory %s' % status[1])
gclient_utils.rmtree(full_path)
return self.Svnversion()
def updatesingle(self, options, args, file_list):
filename = args.pop()
if scm.SVN.AssertVersion("1.5")[0]:
if not os.path.exists(os.path.join(self.checkout_path, '.svn')):
# Create an empty checkout and then update the one file we want. Future
# operations will only apply to the one file we checked out.
command = ["checkout", "--depth", "empty", self.url, self.checkout_path]
self._Run(command, options, cwd=self._root_dir)
if os.path.exists(os.path.join(self.checkout_path, filename)):
os.remove(os.path.join(self.checkout_path, filename))
command = ["update", filename]
self._RunAndGetFileList(command, options, file_list)
# After the initial checkout, we can use update as if it were any other
# dep.
self.update(options, args, file_list)
else:
# If the installed version of SVN doesn't support --depth, fallback to
# just exporting the file. This has the downside that revision
# information is not stored next to the file, so we will have to
# re-export the file every time we sync.
if not os.path.exists(self.checkout_path):
gclient_utils.safe_makedirs(self.checkout_path)
command = ["export", os.path.join(self.url, filename),
os.path.join(self.checkout_path, filename)]
command = self._AddAdditionalUpdateFlags(command, options,
options.revision)
self._Run(command, options, cwd=self._root_dir)
def revert(self, options, _args, file_list):
"""Reverts local modifications. Subversion specific.
All reverted files will be appended to file_list, even if Subversion
doesn't know about them.
"""
if not os.path.isdir(self.checkout_path):
if os.path.exists(self.checkout_path):
gclient_utils.rmtree(self.checkout_path)
# svn revert won't work if the directory doesn't exist. It needs to
# checkout instead.
self.Print('_____ %s is missing, synching instead' % self.relpath)
# Don't reuse the args.
return self.update(options, [], file_list)
if not os.path.isdir(os.path.join(self.checkout_path, '.svn')):
if os.path.isdir(os.path.join(self.checkout_path, '.git')):
self.Print('________ found .git directory; skipping %s' % self.relpath)
return
if os.path.isdir(os.path.join(self.checkout_path, '.hg')):
self.Print('________ found .hg directory; skipping %s' % self.relpath)
return
if not options.force:
raise gclient_utils.Error('Invalid checkout path, aborting')
self.Print(
'\n_____ %s is not a valid svn checkout, synching instead' %
self.relpath)
gclient_utils.rmtree(self.checkout_path)
# Don't reuse the args.
return self.update(options, [], file_list)
def printcb(file_status):
if file_list is not None:
file_list.append(file_status[1])
if logging.getLogger().isEnabledFor(logging.INFO):
logging.info('%s%s' % (file_status[0], file_status[1]))
else:
self.Print(os.path.join(self.checkout_path, file_status[1]))
scm.SVN.Revert(self.checkout_path, callback=printcb)
# Revert() may delete the directory altogether.
if not os.path.isdir(self.checkout_path):
# Don't reuse the args.
return self.update(options, [], file_list)
try:
# svn revert is so broken we don't even use it. Using
# "svn up --revision BASE" achieve the same effect.
# file_list will contain duplicates.
self._RunAndGetFileList(['update', '--revision', 'BASE'], options,
file_list)
except OSError, e:
# Maybe the directory disapeared meanwhile. Do not throw an exception.
logging.error('Failed to update:\n%s' % str(e))
def revinfo(self, _options, _args, _file_list):
"""Display revision"""
try:
return scm.SVN.CaptureRevision(self.checkout_path)
except (gclient_utils.Error, subprocess2.CalledProcessError):
return None
def runhooks(self, options, args, file_list):
self.status(options, args, file_list)
def status(self, options, args, file_list):
"""Display status information."""
command = ['status'] + args
if not os.path.isdir(self.checkout_path):
# svn status won't work if the directory doesn't exist.
self.Print(('\n________ couldn\'t run \'%s\' in \'%s\':\n'
'The directory does not exist.') %
(' '.join(command), self.checkout_path))
# There's no file list to retrieve.
else:
self._RunAndGetFileList(command, options, file_list)
def GetUsableRev(self, rev, _options):
"""Verifies the validity of the revision for this repository."""
if not scm.SVN.IsValidRevision(url='%s@%s' % (self.url, rev)):
raise NoUsableRevError(
( '%s isn\'t a valid revision. Please check that your safesync_url is\n'
'correct.') % rev)
return rev
def FullUrlForRelativeUrl(self, url):
# Find the forth '/' and strip from there. A bit hackish.
return '/'.join(self.url.split('/')[:4]) + url
def _Run(self, args, options, **kwargs):
"""Runs a commands that goes to stdout."""
kwargs.setdefault('cwd', self.checkout_path)
gclient_utils.CheckCallAndFilterAndHeader(['svn'] + args,
always=options.verbose, **kwargs)
def Svnversion(self):
"""Runs the lowest checked out revision in the current project."""
info = scm.SVN.CaptureLocalInfo([], os.path.join(self.checkout_path, '.'))
return info['Revision']
def _RunAndGetFileList(self, args, options, file_list, cwd=None):
"""Runs a commands that goes to stdout and grabs the file listed."""
cwd = cwd or self.checkout_path
scm.SVN.RunAndGetFileList(
options.verbose,
args + ['--ignore-externals'],
cwd=cwd,
file_list=file_list)
@staticmethod
def _AddAdditionalUpdateFlags(command, options, revision):
"""Add additional flags to command depending on what options are set.
command should be a list of strings that represents an svn command.
This method returns a new list to be used as a command."""
new_command = command[:]
if revision:
new_command.extend(['--revision', str(revision).strip()])
# We don't want interaction when jobs are used.
if options.jobs > 1:
new_command.append('--non-interactive')
# --force was added to 'svn update' in svn 1.5.
# --accept was added to 'svn update' in svn 1.6.
if not scm.SVN.AssertVersion('1.5')[0]:
return new_command
# It's annoying to have it block in the middle of a sync, just sensible
# defaults.
if options.force:
new_command.append('--force')
if command[0] != 'checkout' and scm.SVN.AssertVersion('1.6')[0]:
new_command.extend(('--accept', 'theirs-conflict'))
elif options.manually_grab_svn_rev:
new_command.append('--force')
if command[0] != 'checkout' and scm.SVN.AssertVersion('1.6')[0]:
new_command.extend(('--accept', 'postpone'))
elif command[0] != 'checkout' and scm.SVN.AssertVersion('1.6')[0]:
new_command.extend(('--accept', 'postpone'))
return new_command
|
[] |
[] |
[
"CHROME_HEADLESS",
"SUPPRESS_DEPRECATED_SVN_NOTICE"
] |
[]
|
["CHROME_HEADLESS", "SUPPRESS_DEPRECATED_SVN_NOTICE"]
|
python
| 2 | 0 | |
libraries/botframework-streaming/setup.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from setuptools import setup
VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.14.0"
REQUIRES = [
"botbuilder-schema>=4.12.0",
"botframework-connector>=4.12.0",
]
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, "botframework", "streaming", "about.py")) as f:
package_info = {}
info = f.read()
exec(info, package_info)
with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name=package_info["__title__"],
version=package_info["__version__"],
url=package_info["__uri__"],
author=package_info["__author__"],
description=package_info["__description__"],
keywords=["BotFrameworkStreaming", "bots", "ai", "botframework", "botframework",],
long_description=long_description,
long_description_content_type="text/x-rst",
license=package_info["__license__"],
packages=[
"botframework.streaming",
"botframework.streaming.payloads",
"botframework.streaming.payloads.assemblers",
"botframework.streaming.payloads.disassemblers",
"botframework.streaming.payloads.models",
"botframework.streaming.payload_transport",
"botframework.streaming.transport",
"botframework.streaming.transport.web_socket",
],
install_requires=REQUIRES,
classifiers=[
"Programming Language :: Python :: 3.7",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
[] |
[] |
[
"packageVersion"
] |
[]
|
["packageVersion"]
|
python
| 1 | 0 | |
cluster-autoscaler/cloudprovider/baiducloud/baiducloud-sdk-go/util/util.go
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"crypto/aes"
"crypto/hmac"
"crypto/md5"
srand "crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/url"
"os"
"os/exec"
"path"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"k8s.io/klog"
)
// GetURL gets the full URL for a http request.
func GetURL(protocol, host, uriPath string, params map[string]string) string {
if strings.Index(uriPath, "/") == 0 {
uriPath = uriPath[1:]
}
query := strings.Trim(ToCanonicalQueryString(params), " ")
if query == "" {
return fmt.Sprintf("%s/%s", HostToURL(host, protocol), uriPath)
}
return fmt.Sprintf("%s/%s?%s", HostToURL(host, protocol), uriPath, query)
}
// GetURIPath returns the path part of URI.
func GetURIPath(uri string) string {
uri = strings.Replace(uri, "://", "", 1)
index := strings.Index(uri, "/")
return uri[index:]
}
// URIEncodeExceptSlash encodes all characters of a string except the slash character.
func URIEncodeExceptSlash(uri string) string {
var result string
for _, char := range uri {
str := fmt.Sprintf("%c", char)
if str == "/" {
result += str
} else {
result += URLEncode(str)
}
}
return result
}
// HmacSha256Hex returns a encrypted string.
func HmacSha256Hex(key, message string) string {
mac := hmac.New(sha256.New, []byte(key))
mac.Write([]byte(message))
return hex.EncodeToString(mac.Sum(nil))
}
// PKCS7Padding returns a encrypted string.
func PKCS7Padding(ciphertext []byte, blockSize int) []byte {
padding := blockSize - len(ciphertext)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(ciphertext, padtext...)
}
// AesECBEncryptHex returns an AES encrypted string
func AesECBEncryptHex(key, message string) (string, error) {
// ECB is left out intentionally because it's insecure, check https://github.com/golang/go/issues/5597
if len(key) < 16 {
return "", fmt.Errorf("Invalid SecretKey")
}
keyBytes := []byte(key[:16])
msgBytes := []byte(message)
block, err := aes.NewCipher(keyBytes)
if err != nil {
return "", err
}
blockSize := block.BlockSize()
msgBytes = PKCS7Padding(msgBytes, blockSize)
blockMode := NewECBEncrypter(block)
crypted := make([]byte, len(msgBytes))
blockMode.CryptBlocks(crypted, msgBytes)
return hex.EncodeToString(crypted), nil
}
// GetMD5 gets the MD5 value from data.
// Param base64Encode determines whether use Base64Encode meanwhile.
func GetMD5(data interface{}, base64Encode bool) string {
hash := md5.New()
if str, ok := data.(string); ok {
io.Copy(hash, strings.NewReader(str))
} else if byteArray, ok := data.([]byte); ok {
io.Copy(hash, bytes.NewReader(byteArray))
} else if reader, ok := data.(io.Reader); ok {
if f, ok := data.(io.Seeker); ok {
f.Seek(0, 0)
io.Copy(hash, reader)
f.Seek(0, 0)
} else {
io.Copy(hash, reader)
}
} else {
panic("data type should be string or []byte or io.Reader.")
}
if base64Encode {
return Base64Encode(hash.Sum(nil))
}
return hex.EncodeToString(hash.Sum(nil))
}
// GetSha256 gets SHA256 value from data.
func GetSha256(data interface{}) string {
hash := sha256.New()
if str, ok := data.(string); ok {
io.Copy(hash, strings.NewReader(str))
} else if byteArray, ok := data.([]byte); ok {
io.Copy(hash, bytes.NewReader(byteArray))
} else if reader, ok := data.(io.Reader); ok {
if f, ok := data.(io.Seeker); ok {
f.Seek(0, 0)
io.Copy(hash, reader)
f.Seek(0, 0)
} else {
io.Copy(hash, reader)
}
} else {
panic("data type should be string or []byte or io.Reader.")
}
return hex.EncodeToString(hash.Sum(nil))
}
// Base64Encode gets base64 encoded string from data.
func Base64Encode(data []byte) string {
return base64.StdEncoding.EncodeToString(data)
}
// Contains determines whether a string slice contains a certain value.
// Ignore case when comparing if case insensitive.
func Contains(slice []string, value string, caseInsensitive bool) bool {
if caseInsensitive {
value = strings.ToLower(value)
}
for _, v := range slice {
if caseInsensitive {
v = strings.ToLower(v)
}
if value == v {
return true
}
}
return false
}
// MapContains determines whether the string map contains a uncertain value.
// The result is determined by compare function.
func MapContains(m map[string]string, compareFunc func(string, string) bool) bool {
for key, value := range m {
if compareFunc(key, value) {
return true
}
}
return false
}
// GetMapKey returns the key of the map for a certain value.
// Ignore case when comparing if case insensitive.
func GetMapKey(m map[string]string, key string, caseInsensitive bool) string {
if caseInsensitive {
key = strings.ToLower(key)
}
var tempKey string
for k := range m {
tempKey = k
if caseInsensitive {
tempKey = strings.ToLower(k)
}
if tempKey == key {
return k
}
}
return ""
}
// GetMapValue returns the value of the map for a certain key.
// Ignore case when comparing if case insensitive.
func GetMapValue(m map[string]string, key string, caseInsensitive bool) string {
if caseInsensitive {
for k, v := range m {
if strings.ToLower(k) == strings.ToLower(key) {
return v
}
}
}
return m[key]
}
// TimeToUTCString returns a utc string of a time instance.
func TimeToUTCString(t time.Time) string {
format := time.RFC3339 // 2006-01-02T15:04:05Z07:00
return t.UTC().Format(format)
}
// TimeStringToRFC1123 returns a formatted string of `time.RFC1123` format.
func TimeStringToRFC1123(str string) string {
t, err := time.Parse(time.RFC3339, str)
if err != nil {
t, err = time.Parse(time.RFC1123, str)
if err != nil {
panic("Time format invalid. The time format must be time.RFC3339 or time.RFC1123")
}
}
return t.Format(time.RFC1123)
}
// HostToURL returns the whole URL string.
func HostToURL(host, protocol string) string {
if matched, _ := regexp.MatchString("^[[:alpha:]]+:", host); matched {
return host
}
if protocol == "" {
protocol = "http"
}
return fmt.Sprintf("%s://%s", protocol, host)
}
// ToCanonicalQueryString returns the canonicalized query string.
func ToCanonicalQueryString(params map[string]string) string {
if params == nil {
return ""
}
encodedQueryStrings := make([]string, 0, 10)
var query string
for key, value := range params {
if key != "" {
query = URLEncode(key) + "="
if value != "" {
query += URLEncode(value)
}
encodedQueryStrings = append(encodedQueryStrings, query)
}
}
sort.Strings(encodedQueryStrings)
return strings.Join(encodedQueryStrings, "&")
}
// ToCanonicalHeaderString returns the canonicalized string.
func ToCanonicalHeaderString(headerMap map[string]string) string {
headers := make([]string, 0, len(headerMap))
for key, value := range headerMap {
headers = append(headers,
fmt.Sprintf("%s:%s", URLEncode(strings.ToLower(key)),
URLEncode(strings.TrimSpace(value))))
}
sort.Strings(headers)
return strings.Join(headers, "\n")
}
// URLEncode encodes a string like Javascript's encodeURIComponent()
func URLEncode(str string) string {
// BUG(go): see https://github.com/golang/go/issues/4013
// use %20 instead of the + character for encoding a space
return strings.Replace(url.QueryEscape(str), "+", "%20", -1)
}
// SliceToLower transforms each item of a slice to lowercase.
func SliceToLower(slice []string) {
for index, value := range slice {
slice[index] = strings.ToLower(value)
}
}
// MapKeyToLower transforms each item of a map to lowercase.
func MapKeyToLower(m map[string]string) {
temp := make(map[string]string, len(m))
for key, value := range m {
temp[strings.ToLower(key)] = value
delete(m, key)
}
for key, value := range temp {
m[key] = value
}
}
// ToMap converts anything to a map instance.
func ToMap(i interface{}, keys ...string) (map[string]interface{}, error) {
var m map[string]interface{}
var byteArray []byte
if str, ok := i.(string); ok {
byteArray = []byte(str)
} else if b, ok := i.([]byte); ok {
byteArray = b
} else {
b, err := json.Marshal(i)
if err != nil {
return nil, err
}
byteArray = b
}
if err := json.Unmarshal(byteArray, &m); err != nil {
return nil, err
}
if keys != nil && len(keys) > 0 {
result := make(map[string]interface{}, len(keys))
for _, k := range keys {
if v, ok := m[k]; ok {
result[k] = v
}
}
return result, nil
}
return m, nil
}
// ToJson converts anything to JSON.
func ToJson(i interface{}, keys ...string) ([]byte, error) {
byteArray, err := json.Marshal(i)
if keys == nil || len(keys) == 0 {
return byteArray, err
}
if err == nil {
m, err := ToMap(byteArray, keys...)
if err != nil {
return nil, err
}
byteArray, _ = json.Marshal(m)
}
return byteArray, err
}
// CheckFileExists checks if specified file exists.
func CheckFileExists(filename string) bool {
exist := true
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false
}
return exist
}
// TempFileWithSize generates a temp file with specified size.
func TempFileWithSize(fileSize int64) (*os.File, error) {
f, err := TempFile(nil, "", "")
if err != nil {
return nil, err
}
if err = f.Truncate(fileSize); err != nil {
return nil, err
}
return f, nil
}
// TempFile generates a temp file with separated content.
func TempFile(content []byte, dir, prefix string) (*os.File, error) {
if dir == "" {
home, err := HomeDir()
if err != nil {
return nil, err
}
dir = path.Join(home, "tmp")
}
if prefix == "" {
prefix = "temp"
}
if !CheckFileExists(dir) {
err := os.MkdirAll(dir, 0744)
if err != nil {
return nil, err
}
}
tmpfile, err := ioutil.TempFile(dir, prefix)
if err != nil {
return nil, err
}
if content != nil {
if _, err := tmpfile.Write(content); err != nil {
return nil, err
}
}
_, err = tmpfile.Seek(0, 0)
if err != nil {
return nil, err
}
return tmpfile, nil
}
var homeDir string
// HomeDir returns the HOME directory of current login user.
func HomeDir() (string, error) {
if homeDir != "" {
return homeDir, nil
}
var result string
var err error
if runtime.GOOS == "windows" {
result, err = dirWindows()
} else {
result, err = dirUnix()
}
if err != nil {
return "", err
}
homeDir = result
return result, nil
}
func dirUnix() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
// If that fails, try getent
var stdout bytes.Buffer
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
// If "getent" is missing, ignore it
if err == exec.ErrNotFound {
return "", err
}
} else {
if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
// username:password:uid:gid:gecos:home:shell
passwdParts := strings.SplitN(passwd, ":", 7)
if len(passwdParts) > 5 {
return passwdParts[5], nil
}
}
}
// If all else fails, try the shell
stdout.Reset()
cmd = exec.Command("sh", "-c", "cd && pwd")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
result := strings.TrimSpace(stdout.String())
if result == "" {
return "", errors.New("blank output when reading home directory")
}
return result, nil
}
func dirWindows() (string, error) {
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
home = os.Getenv("USERPROFILE")
}
if home == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
}
return home, nil
}
// Debug generates debug info for debug mode.
func Debug(title, message string) {
if title != "" {
klog.V(5).Infof("----------------------------DEBUG: start of %s ----------------------------", title)
}
klog.V(5).Infof(message)
if title != "" {
klog.V(5).Infof("----------------------------DEBUG: end of %s------------------------------", title)
}
}
// FormatTest returns a formatted string for unit test.
func FormatTest(funcName, got, expected string) string {
return fmt.Sprintf("%s failed. Got %s, expected %s", funcName, got, expected)
}
const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
//CreateRandomString create random string
func CreateRandomString() string {
b := make([]byte, 32)
l := len(dictionary)
_, err := srand.Read(b)
if err != nil {
// fail back to insecure rand
rand.Seed(time.Now().UnixNano())
for i := range b {
b[i] = dictionary[rand.Int()%l]
}
} else {
for i, v := range b {
b[i] = dictionary[v%byte(l)]
}
}
return string(b)
}
|
[
"\"HOME\"",
"\"HOME\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
allure-behave/features/steps/behave_steps.py
|
import os
from tempfile import mkdtemp
import allure_commons
from allure_commons_test.report import AllureReport
from behave.parser import Parser
from behave.runner import ModelRunner
from behave.configuration import Configuration
from behave.formatter._registry import make_formatters
from behave.formatter.base import StreamOpener
import tempfile
from contextlib import contextmanager
@given(u'feature definition')
@given(u'feature definition {lang}')
def feature_definition(context, **kwargs):
parser = Parser(language=kwargs.get('lang', None))
feature = parser.parse(context.text)
if hasattr(context, "feature_definition"):
context.feature_definition.append(feature)
else:
context.feature_definition = [feature]
@given(u'hooks implementation')
def hooks_implementations(context):
context.globals = {}
exec(context.text, context.globals)
@given(u'test plan')
def test_plan_helper(context):
tmp_dir = os.environ.get("TEST_TMP")
file, filename = tempfile.mkstemp(suffix=".json", dir=tmp_dir)
os.environ["ALLURE_TESTPLAN_PATH"] = filename
with os.fdopen(file, 'w') as tmp:
tmp.write(context.text)
context.test_plan = filename
@when(u'I run behave with allure formatter')
@when(u'I run behave with allure formatter with options "{args}"')
def run_behave_with_allure(context, **kwargs):
with test_context():
cmd_args = '-f allure_behave.formatter:AllureFormatter'
cmd = '{options} {cmd}'.format(cmd=cmd_args, options=kwargs.get('args', ''))
config = Configuration(command_args=cmd)
result_tmp_dir = mkdtemp(dir=os.environ.get('TEST_TMP', None))
stream_opener = StreamOpener(filename=result_tmp_dir)
model_runner = ModelRunner(config, context.feature_definition)
model_runner.formatters = make_formatters(config, [stream_opener])
model_runner.formatters[0].listener.fixture_context.enter()
model_runner.hooks = getattr(context, 'globals', dict())
model_runner.run()
model_runner.formatters[0].listener.__del__()
context.allure_report = AllureReport(result_tmp_dir)
os.environ.pop("ALLURE_TESTPLAN_PATH", None)
@contextmanager
def test_context():
def _unregister_plugins():
plugins = []
for name, plugin in allure_commons.plugin_manager.list_name_plugin():
allure_commons.plugin_manager.unregister(plugin=plugin, name=name)
plugins.append(plugin)
return plugins
plugins = _unregister_plugins()
yield
_unregister_plugins()
for plugin in plugins:
allure_commons.plugin_manager.register(plugin)
|
[] |
[] |
[
"ALLURE_TESTPLAN_PATH",
"TEST_TMP"
] |
[]
|
["ALLURE_TESTPLAN_PATH", "TEST_TMP"]
|
python
| 2 | 0 | |
core_test/search_test.go
|
package core_test
import (
"fmt"
std "github.com/gzg1984/golucene/analysis/standard"
_ "github.com/gzg1984/golucene/core/codec/lucene410"
docu "github.com/gzg1984/golucene/core/document"
"github.com/gzg1984/golucene/core/index"
"github.com/gzg1984/golucene/core/search"
"github.com/gzg1984/golucene/core/store"
"github.com/gzg1984/golucene/core/util"
// . "github.com/gzg1984/golucene/test_framework"
// "github.com/gzg1984/golucene/test_framework/analysis"
// . "github.com/gzg1984/golucene/test_framework/util"
. "github.com/gzg1984/gounit"
"os"
"testing"
)
// Hook up custom test logic into Go's test runner.
func TestBefore(t *testing.T) {
fmt.Printf("tests_codec: %v\n", os.Getenv("tests_codec"))
// util.SetDefaultInfoStream(util.NewPrintStreamInfoStream(os.Stdout))
index.DefaultSimilarity = func() index.Similarity {
return search.NewDefaultSimilarity()
}
// This controls how suite-level rules are nested. It is important
// that _all_ rules declared in testcase are executed in proper
// order if they depend on each other.
// ClassRuleChain(ClassEnvRule)
// BeforeSuite(t)
}
func TestBasicIndexAndSearch(t *testing.T) {
q := search.NewTermQuery(index.NewTerm("foo", "bar"))
q.SetBoost(-42)
os.RemoveAll(".gltest")
directory, err := store.OpenFSDirectory(".gltest")
It(t).Should("has no error: %v", err).Assert(err == nil)
It(t).Should("has valid directory").Assert(directory != nil)
fmt.Println("Directory", directory)
defer directory.Close()
analyzer := std.NewStandardAnalyzer()
conf := index.NewIndexWriterConfig(util.VERSION_LATEST, analyzer)
writer, err := index.NewIndexWriter(directory, conf)
It(t).Should("has no error: %v", err).Assert(err == nil)
d := docu.NewDocument()
d.Add(docu.NewTextFieldFromString("foo", "bar", docu.STORE_YES))
err = writer.AddDocument(d.Fields())
It(t).Should("has no error: %v", err).Assert(err == nil)
err = writer.Close() // ensure index is written
It(t).Should("has no error: %v", err).Assert(err == nil)
reader, err := index.OpenDirectoryReader(directory)
It(t).Should("has no error: %v", err).Assert(err == nil)
defer reader.Close()
searcher := search.NewIndexSearcher(reader)
res, err := searcher.Search(q, nil, 1000)
It(t).Should("has no error: %v", err).Assert(err == nil)
hits := res.ScoreDocs
It(t).Should("expect 1 hits, but %v only.", len(hits)).Assert(len(hits) == 1)
It(t).Should("expect score to be negative (got %v)", hits[0].Score).Verify(hits[0].Score < 0)
explain, err := searcher.Explain(q, hits[0].Doc)
It(t).Should("has no error: %v", err).Assert(err == nil)
It(t).Should("score doesn't match explanation (%v vs %v)", hits[0].Score, explain.Value()).Verify(isSimilar(hits[0].Score, explain.Value(), 0.001))
It(t).Should("explain doesn't think doc is a match").Verify(explain.IsMatch())
}
// func TestNegativeQueryBoost(t *testing.T) {
// Test(t, func(t *T) {
// q := search.NewTermQuery(index.NewTerm("foo", "bar"))
// q.SetBoost(-42)
// t.Assert(-42 == q.Boost())
// directory := NewDirectory()
// defer directory.Close()
// analyzer := analysis.NewMockAnalyzerWithRandom(Random())
// conf := NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)
// writer, err := index.NewIndexWriter(directory, conf)
// if err != nil {
// t.Error(err)
// }
// defer writer.Close()
// d := docu.NewDocument()
// d.Add(NewTextField("foo", "bar", true))
// writer.AddDocument(d.Fields())
// writer.Close() // ensure index is written
// reader, err := index.OpenDirectoryReader(directory)
// if err != nil {
// t.Error(err)
// }
// defer reader.Close()
// searcher := NewSearcher(reader)
// res, err := searcher.Search(q, nil, 1000)
// if err != nil {
// t.Error(err)
// }
// hits := res.ScoreDocs
// t.Assert(1 == len(hits))
// t.Assert2(hits[0].Score < 0, fmt.Sprintf("score is not negative: %v", hits[0].Score))
// explain, err := searcher.Explain(q, hits[0].Doc)
// if err != nil {
// t.Error(err)
// }
// t.Assert2(isSimilar(hits[0].Score, explain.Value(), 0.001), "score doesn't match explanation")
// t.Assert2(explain.IsMatch(), "explain doesn't think doc is a match")
// })
// }
func isSimilar(f1, f2, delta float32) bool {
diff := f1 - f2
return diff >= 0 && diff < delta || diff < 0 && -diff < delta
}
func TestAfter(t *testing.T) {
// AfterSuite(t)
}
|
[
"\"tests_codec\""
] |
[] |
[
"tests_codec"
] |
[]
|
["tests_codec"]
|
go
| 1 | 0 | |
08_regular _expressions/10_happiness_index.py
|
import re
input_data = input()
happy_pattern = r'[:;\(\*c\[][\)D\*\]\}:;]'
sad_pattern = r'[:;\)D\]][\(c\[\{:;]'
happy_regex = re.compile(happy_pattern)
sad_regex = re.compile(sad_pattern)
happy_count_raw = happy_regex.findall(input_data)
sad_count_raw = sad_regex.findall(input_data)
happy_emoticons = [item for item in happy_count_raw if len(list(set(item))) > 1]
sad_emoticons = [item for item in sad_count_raw if len(list(set(item))) > 1]
happy_index = len(happy_emoticons) / len(sad_emoticons)
if happy_index >= 2:
print(f'Happiness index: {happy_index:.2f} :D')
elif happy_index > 1:
print(f'Happiness index: {happy_index:.2f} :)')
elif happy_index == 1:
print(f'Happiness index: {happy_index:.2f} :|')
else:
print(f'Happiness index: {happy_index:.2f} :(')
print(f'[Happy count: {len(happy_emoticons)}, Sad count: {len(sad_emoticons)}]')
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
tests/edalize_common.py
|
from collections import OrderedDict
import os.path
import shutil
import pytest
from edalize import get_edatool
tests_dir = os.path.dirname(__file__)
class TestFixture:
'''A fixture that makes an edalize backend with work_root directory
Create this object using the make_edalize_test factory fixture. This passes
through its `tool_name` and sets up a temporary directory for `work_root`,
then passes its keyword arguments through to the TestFixture initializer.
Args:
tool_name: The name of the tool
work_root: The directory to treat as a work root
test_name: The name to call the backend. Defaults to
`'test_<tool_name>_0'`
param_types: A list of parameter types. Defaults to `['plusarg',
'vlogdefine', 'vlogparam']` (the parameter types supported
by most simulators).
files: A list of files to use. Defaults to `None`, which means to use
:py:data:`FILES`.
tool_options: Dictionary passed to _setup_backend. Defaults to `{}`.
ref_dir: A reference directory relative to `test_<tool_name>`. Defaults
to `'.'`
use_vpi: If true, set up backend with definitions from :attr:`VPI`.
Defaults to `False`.
'''
def __init__(self,
tool_name,
work_root,
test_name=None,
param_types=['plusarg', 'vlogdefine', 'vlogparam'],
files=None,
tool_options={},
ref_dir='.',
use_vpi=False):
raw_ref_dir = os.path.join(tests_dir, 'test_' + tool_name, ref_dir)
self.test_name = ('test_{}_0'.format(tool_name)
if test_name is None else test_name)
self.ref_dir = os.path.normpath(raw_ref_dir)
self.work_root = work_root
self.backend = _setup_backend(self.test_name, tool_name, param_types,
files, tool_options, work_root, use_vpi)
def compare_files(self, files, ref_subdir='.'):
'''Check some files in the work root match those in the ref directory
The files argument gives the list of files to check. These are
interpreted as paths relative to the work directory and relative to
self.ref_dir / ref_subdir.
This is a wrapper around edalize_common.compare_files: see its
documentation for how to use the :envvar:`GOLDEN_RUN` environment
variable to copy across a golden reference.
'''
ref_dir = os.path.normpath(os.path.join(self.ref_dir, ref_subdir))
return compare_files(ref_dir, self.work_root, files)
def copy_to_work_root(self, path):
shutil.copy(os.path.join(self.ref_dir, path),
os.path.join(self.work_root, path))
@pytest.fixture
def make_edalize_test(monkeypatch, tmpdir):
'''A factory fixture to make an edalize backend with work_root directory
The returned factory method takes a `tool_name` (the name of the tool) and
the keyword arguments supported by :class:`TestFixture`. It returns a
:class:`TestFixture` object, whose `work_root` is a temporary directory.
'''
# Prepend directory `mock_commands` to PATH environment variable
monkeypatch.setenv('PATH', os.path.join(tests_dir, 'mock_commands'), ':')
created = []
def _fun(tool_name, **kwargs):
work_root = tmpdir / str(len(created))
work_root.mkdir()
fixture = TestFixture(tool_name, str(work_root), **kwargs)
created.append(fixture)
return fixture
return _fun
def compare_files(ref_dir, work_root, files):
"""Check that all *files* in *work_root* match those in *ref_dir*.
If the environment variable :envvar:`GOLDEN_RUN` is set, the *files* in
*work_root* are copied to *ref_dir* to become the new reference.
"""
for f in files:
reference_file = os.path.join(ref_dir, f)
generated_file = os.path.join(work_root, f)
assert os.path.exists(generated_file)
if 'GOLDEN_RUN' in os.environ:
shutil.copy(generated_file, reference_file)
with open(reference_file) as fref, open(generated_file) as fgen:
assert fref.read() == fgen.read(), f
def param_gen(paramtypes):
"""Generate dictionary of definitions in *paramtypes* list."""
defs = OrderedDict()
for paramtype in paramtypes:
for datatype in ['bool', 'int', 'str']:
if datatype == 'int':
default = 42
elif datatype == 'str':
default = 'hello'
else:
default = True
defs[paramtype+'_'+datatype] = {
'datatype' : datatype,
'default' : default,
'description' : '',
'paramtype' : paramtype}
return defs
def _setup_backend(name, tool, paramtypes, files,
tool_options, work_root, use_vpi):
"""Set up a backend.
The backend is called *name*, is set up for *tool* with *tool_options*,
*paramtypes*, and, if *use_vpi* is ``True``, definitions from :attr:`VPI`.
If *files* is None, files are taken from :attr:`FILES`.
"""
parameters = param_gen(paramtypes)
_vpi = []
if use_vpi:
_vpi = VPI
for v in VPI:
for f in v['src_files']:
_f = os.path.join(work_root, f)
if not os.path.exists(os.path.dirname(_f)):
os.makedirs(os.path.dirname(_f))
with open(_f, 'a'):
os.utime(_f, None)
edam = {'name' : name,
'files' : FILES if files is None else files,
'parameters' : parameters,
'tool_options' : {tool : tool_options},
'toplevel' : 'top_module',
'vpi' : _vpi}
return get_edatool(tool)(edam=edam, work_root=work_root)
FILES = [
{"name": "qip_file.qip", "file_type": "QIP"},
{"name": "qsys_file", "file_type": "QSYS"},
{"name": "sdc_file", "file_type": "SDC"},
{"name": "bmm_file", "file_type": "BMM"},
{"name": "sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pcf_file.pcf", "file_type": "PCF"},
{"name": "ucf_file.ucf", "file_type": "UCF"},
{"name": "user_file", "file_type": "user"},
{"name": "tcl_file.tcl", "file_type": "tclSource"},
{"name": "waiver_file.waiver", "file_type": "waiver"},
{"name": "vlog_file.v", "file_type": "verilogSource"},
{"name": "vlog05_file.v", "file_type": "verilogSource-2005"},
{"name": "vlog_incfile", "file_type": "verilogSource", "is_include_file": True},
{"name": "vhdl_file.vhd", "file_type": "vhdlSource"},
{"name": "vhdl_lfile", "file_type": "vhdlSource", "logical_name": "libx"},
{"name": "vhdl2008_file", "file_type": "vhdlSource-2008"},
{"name": "xci_file.xci", "file_type": "xci"},
{"name": "xdc_file.xdc", "file_type": "xdc"},
{"name": "bootrom.mem", "file_type": "mem"},
{"name": "c_file.c", "file_type": "cSource"},
{"name": "cpp_file.cpp", "file_type": "cppSource"},
{"name": "c_header.h", "file_type": "cSource", "is_include_file": True},
{"name": "c_header.h", "file_type": "cppSource", "is_include_file": True},
{"name": "config.vbl", "file_type": "veribleLintRules"},
{"name": "verible_waiver.vbw", "file_type": "veribleLintWaiver"},
{"name": "verible_waiver2.vbw", "file_type": "veribleLintWaiver"},
{'name': 'config.sby.j2', 'file_type': 'sbyConfigTemplate'},
{"name": "another_sv_file.sv", "file_type": "systemVerilogSource"},
]
"""Files of all supported file types."""
VPI = [
{'src_files': ['src/vpi_1/f1',
'src/vpi_1/f3'],
'include_dirs': ['src/vpi_1/'],
'libs': ['some_lib'],
'name': 'vpi1'},
{'src_files': ['src/vpi_2/f4'],
'include_dirs': [],
'libs': [],
'name': 'vpi2'}]
"""Predefined VPI modules to build."""
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
google/appengine/_internal/django/core/management/color.py
|
"""
Sets up the terminal color scheme.
"""
from builtins import object
import os
import sys
from google.appengine._internal.django.utils import termcolors
def supports_color():
"""
Returns True if the running system's terminal supports color, and False
otherwise.
"""
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if unsupported_platform or not is_a_tty:
return False
return True
def color_style():
"""Returns a Style object with the Django color scheme."""
if not supports_color():
style = no_style()
else:
DJANGO_COLORS = os.environ.get('DJANGO_COLORS', '')
color_settings = termcolors.parse_color_setting(DJANGO_COLORS)
if color_settings:
class dummy(object): pass
style = dummy()
# The nocolor palette has all available roles.
# Use that pallete as the basis for populating
# the palette as defined in the environment.
for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]:
format = color_settings.get(role,{})
setattr(style, role, termcolors.make_style(**format))
# For backwards compatibility,
# set style for ERROR_OUTPUT == ERROR
style.ERROR_OUTPUT = style.ERROR
else:
style = no_style()
return style
def no_style():
"""Returns a Style object that has no colors."""
class dummy(object):
def __getattr__(self, attr):
return lambda x: x
return dummy()
|
[] |
[] |
[
"DJANGO_COLORS"
] |
[]
|
["DJANGO_COLORS"]
|
python
| 1 | 0 | |
tensorflow/python/keras/layers/normalization.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
import os
from tensorflow.python.ops import quantemu_ops
class BatchNormalizationBase(Layer):
"""Base class of Batch normalization layer (Ioffe and Szegedy, 2014).
Normalize the activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Arguments:
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation, or raise a ValueError
if the fused implementation cannot be used. If `None`, use the faster
implementation if possible. If False, do not used the fused
implementation.
trainable: Boolean, if `True` the variables will be marked as trainable.
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random.uniform(shape[-1:], 0.93, 1.07),
tf.random.uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
- `training=True`: The layer will normalize its inputs using the
mean and variance of the current batch of inputs.
- `training=False`: The layer will normalize its inputs using the
mean and variance of its moving statistics, learned during training.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
{{TRAINABLE_ATTRIBUTE_NOTE}}
"""
# By default, the base class uses V2 behavior. The BatchNormalization V1
# subclass sets this to False to use the V1 behavior.
_USE_V2_BEHAVIOR = True
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalizationBase, self).__init__(
name=name, **kwargs)
if isinstance(axis, list):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError('axis must be int or list, type given: %s'
% type(axis))
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(
moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if self._USE_V2_BEHAVIOR:
if fused:
self._raise_if_fused_cannot_be_used()
# We leave fused as None if self._fused_can_be_used()==True, since we
# still may set it to False in self.build() if the input rank is not 4.
elif fused is None and not self._fused_can_be_used():
fused = False
elif fused is None:
fused = True
self.supports_masking = True
self.fused = fused
self._bessels_correction_test_only = True
self._trainable_var = None
self.trainable = trainable
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def _raise_if_fused_cannot_be_used(self):
"""Raises a ValueError if fused implementation cannot be used.
In addition to the checks done in this function, the input tensors rank must
be 4. The input rank check can only be done once the input shape is known.
"""
# Currently fused batch norm doesn't support renorm. It also only supports a
# channel dimension on axis 1 or 3, when no virtual batch size or adjustment
# is used.
if self.renorm:
raise ValueError('Passing both fused=True and renorm=True is '
'unsupported')
axis = [self.axis] if isinstance(self.axis, int) else self.axis
# Axis -3 is equivalent to 1, and axis -1 is equivalent to 3, because the
# input rank is required to be 4 (which is checked later).
if len(axis) > 1 or axis[0] not in (-3, -1, 1, 3):
raise ValueError('Passing fused=True is only supported when axis is 1 '
'or 3')
if self.virtual_batch_size is not None:
raise ValueError('Passing fused=True is unsupported when '
'virtual_batch_size is specified.')
if self.adjustment is not None:
raise ValueError('Passing fused=True is unsupported when '
'adjustment is specified.')
def _fused_can_be_used(self):
try:
self._raise_if_fused_cannot_be_used()
return True
except ValueError:
return False
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
if self._trainable_var is not None:
self._trainable_var.update_value(value)
def _get_trainable_var(self):
if self._trainable_var is None:
self._trainable_var = K.freezable_variable(
self._trainable, name=self.name + '_trainable')
return self._trainable_var
@property
def _param_dtype(self):
# Raise parameters of fp16 batch norm to fp32
if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16:
return dtypes.float32
else:
return self.dtype or dtypes.float32
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input Tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0')
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused in (None, True):
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
if self._USE_V2_BEHAVIOR:
if self.fused is None:
self.fused = (ndims == 4)
elif self.fused and ndims != 4:
raise ValueError('Batch normalization layers with fused=True only '
'support 4D input tensors.')
else:
assert self.fused is not None
self.fused = (ndims == 4 and self._fused_can_be_used())
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1]:
self._data_format = 'NCHW'
elif self.axis == [3]:
self._data_format = 'NHWC'
else:
raise ValueError('Unsupported axis, fused batch norm only supports '
'axis == [1] or axis == [3]')
axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.fused:
self._gamma_const = K.constant(
1.0, dtype=self._param_dtype, shape=param_shape)
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
experimental_autocast=False)
else:
self.beta = None
if self.fused:
self._beta_const = K.constant(
0.0, dtype=self._param_dtype, shape=param_shape)
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_weight(
name='moving_mean',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.moving_mean_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
self.moving_variance = self.add_weight(
name='moving_variance',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.moving_variance_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
"""Create a renorm variable."""
var = self.add_weight(
name=name,
shape=shape,
dtype=self._param_dtype,
initializer=init_ops.zeros_initializer(),
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
return var
with distribution_strategy_context.get_strategy(
).extended.colocate_vars_with(self.moving_mean):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape)
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
with distribution_strategy_context.get_strategy(
).extended.colocate_vars_with(self.moving_variance):
self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape)
self.renorm_stddev_weight = _renorm_variable('renorm_stddev_weight',
())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, momentum, inputs_size):
with K.name_scope('AssignMovingAvg') as scope:
with ops.colocate_with(variable):
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (
variable - math_ops.cast(value, variable.dtype)) * decay
if inputs_size is not None:
update_delta = array_ops.where(inputs_size > 0, update_delta,
K.zeros_like(update_delta))
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
# TODO(b/129279393): Support zero batch input in non DistributionStrategy
# code as well.
# TODO(b/130185866): Support zero batch input in graph mode.
if ops.executing_eagerly_outside_functions(
) and distribution_strategy_context.has_strategy():
inputs_size = array_ops.size(inputs)
else:
inputs_size = None
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
if distribution_strategy_context.in_cross_replica_context():
strategy = distribution_strategy_context.get_strategy()
def mean_update():
return strategy.extended.update(self.moving_mean,
self._assign_moving_average,
(mean, self.momentum, inputs_size))
def variance_update():
return strategy.extended.update(
self.moving_variance, self._assign_moving_average,
(variance, self.momentum, inputs_size))
else:
def mean_update():
return self._assign_moving_average(self.moving_mean, mean, momentum,
inputs_size)
def variance_update():
return self._assign_moving_average(self.moving_variance, variance,
momentum, inputs_size)
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
return output
def _renorm_correction_and_moments(self, mean, variance, training,
inputs_size):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0.
r = tf_utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r))
d = tf_utils.smart_cond(training,
lambda: d,
lambda: array_ops.zeros_like(d))
def _update_renorm_variable(var, weight, value, inputs_size):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
"""Updates the var and weight, returns their updated ratio."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = self._assign_moving_average(var, value, self.renorm_momentum,
inputs_size)
new_weight = self._assign_moving_average(weight, weight_value,
self.renorm_momentum,
inputs_size)
# TODO(yuefengz): the updates to var and weighted can not be batched
# together if we fetch their updated values here. Consider calculating
# new values and delaying the updates.
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return tf_utils.smart_cond(training, _do_update, _fake_update)
# TODO(yuefengz): colocate the operations
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight, mean,
inputs_size)
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight, stddev,
inputs_size)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def _moments(self, inputs, reduction_axes, keep_dims):
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
# TODO(b/129279393): Support zero batch input in non DistributionStrategy
# code as well.
# TODO(b/130185866): Support zero batch input in graph mode.
if (ops.executing_eagerly_outside_functions() and
distribution_strategy_context.has_strategy()):
inputs_size = array_ops.size(inputs)
mean = array_ops.where(inputs_size > 0, mean, K.zeros_like(mean))
variance = array_ops.where(inputs_size > 0, variance,
K.zeros_like(variance))
return mean, variance
def _get_training_value(self, training=None):
if training is None:
training = K.learning_phase()
if self._USE_V2_BEHAVIOR:
if isinstance(training, int):
training = bool(training)
if base_layer_utils.is_in_keras_graph():
training = math_ops.logical_and(training, self._get_trainable_var())
else:
training = math_ops.logical_and(training, self.trainable)
return training
def call(self, inputs, training=None):
training = self._get_training_value(training)
enable_quantop_bnorm = int(os.getenv('ENABLE_QUANTOP_BNORM', 0))
dformat = 'unknown'
if enable_quantop_bnorm == 1 :
inputs = quantemu_ops.quantize_emu(inputs,
data_format=dformat,
allocate_copy=int(0),
data_type=int(os.getenv('QUANTEMU_BNORM_DATA_TYPE', 0)),
precision=int(os.getenv('QUANTEMU_PRECISION_BNORM_INPUTS', 23)),
exponent_bits=int(os.getenv('QUANTEMU_EXPBITS', 5)),
channel_blocking_type=int(os.getenv('QUANTEMU_CBLOCK_TYPE_BNORM_INPUTS', 0)),
channels_per_block=int(os.getenv('QUANTEMU_CBLOCK_SIZE_INPUTS', 0)),
round_mode=int(os.getenv('QUANTEMU_BNORM_RMODE_INPUTS', 0)))
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
outputs = undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value
def _broadcast(v):
if (v is not None and len(v.shape) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = tf_utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = tf_utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = tf_utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
mean, variance = self._moments(
math_ops.cast(inputs, self._param_dtype),
reduction_axes,
keep_dims=keep_dims)
moving_mean = self.moving_mean
moving_variance = self.moving_variance
mean = tf_utils.smart_cond(training,
lambda: mean,
lambda: ops.convert_to_tensor(moving_mean))
variance = tf_utils.smart_cond(
training,
lambda: variance,
lambda: ops.convert_to_tensor(moving_variance))
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(mean, axis=1, keepdims=True)
new_variance = math_ops.reduce_mean(variance, axis=1, keepdims=True)
else:
new_mean, new_variance = mean, variance
if ops.executing_eagerly_outside_functions(
) and distribution_strategy_context.has_strategy():
inputs_size = array_ops.size(inputs)
else:
inputs_size = None
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
new_mean, new_variance, training, inputs_size)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
if distribution_strategy_context.in_cross_replica_context():
strategy = distribution_strategy_context.get_strategy()
def _do_update(var, value):
"""Compute the updates for mean and variance."""
return strategy.extended.update(
var,
self._assign_moving_average, (value, self.momentum, inputs_size),
group=False)
# We need to unwrap the moving_mean or moving_variance in the case of
# training being false to match the output of true_fn and false_fn
# in the smart cond.
def mean_update():
true_branch = lambda: _do_update(self.moving_mean, new_mean)
false_branch = lambda: strategy.unwrap(self.moving_mean)
return tf_utils.smart_cond(training, true_branch, false_branch)
def variance_update():
return tf_utils.smart_cond(
training, lambda: _do_update(self.moving_variance, new_variance),
lambda: strategy.unwrap(self.moving_variance))
else:
def _do_update(var, value):
"""Compute the updates for mean and variance."""
return self._assign_moving_average(var, value, self.momentum,
inputs_size)
def mean_update():
true_branch = lambda: _do_update(self.moving_mean, new_mean)
false_branch = lambda: self.moving_mean
return tf_utils.smart_cond(training, true_branch, false_branch)
def variance_update():
true_branch = lambda: _do_update(self.moving_variance, new_variance)
false_branch = lambda: self.moving_variance
return tf_utils.smart_cond(training, true_branch, false_branch)
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
else:
mean, variance = self.moving_mean, self.moving_variance
mean = math_ops.cast(mean, inputs.dtype)
variance = math_ops.cast(variance, inputs.dtype)
if offset is not None:
offset = math_ops.cast(offset, inputs.dtype)
if scale is not None:
scale = math_ops.cast(scale, inputs.dtype)
# TODO(reedwm): Maybe do math in float32 if given float16 inputs, if doing
# math in float16 hurts validation accuracy of popular models like resnet.
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
outputs = undo_virtual_batching(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'momentum': self.momentum,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'moving_mean_initializer':
initializers.serialize(self.moving_mean_initializer),
'moving_variance_initializer':
initializers.serialize(self.moving_variance_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
# Only add TensorFlow-specific parameters if they are set, so as to preserve
# model compatibility with external Keras.
if self.renorm:
config['renorm'] = True
config['renorm_clipping'] = self.renorm_clipping
config['renorm_momentum'] = self.renorm_momentum
if self.virtual_batch_size is not None:
config['virtual_batch_size'] = self.virtual_batch_size
# Note: adjustment is not serializable.
if self.adjustment is not None:
logging.warning('The `adjustment` function of this `BatchNormalization` '
'layer cannot be serialized and has been omitted from '
'the layer config. It will not be included when '
're-creating the layer from the saved config.')
base_config = super(BatchNormalizationBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def replace_in_base_docstring(replacements):
string = BatchNormalizationBase.__doc__
for old, new in replacements:
assert old in string
string.replace(old, new)
return string
@keras_export(v1=['keras.layers.BatchNormalization']) # pylint: disable=missing-docstring
class BatchNormalization(BatchNormalizationBase):
__doc__ = replace_in_base_docstring(
[('''
fused: if `True`, use a faster, fused implementation, or raise a ValueError
if the fused implementation cannot be used. If `None`, use the faster
implementation if possible. If False, do not used the fused
implementation.''',
'''
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.'''),
('{{TRAINABLE_ATTRIBUTE_NOTE}}', '')])
_USE_V2_BEHAVIOR = False
@keras_export('keras.layers.LayerNormalization')
class LayerNormalization(Layer):
"""Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
example close to 0 and the activation standard deviation close to 1.
Arguments:
axis: Integer or List/Tuple. The axis that should be normalized
(typically the features axis).
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
trainable: Boolean, if `True` the variables will be marked as trainable.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Layer Normalization](https://arxiv.org/abs/1607.06450)
"""
def __init__(self,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
trainable=True,
name=None,
**kwargs):
super(LayerNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
if isinstance(axis, (list, tuple)):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = axis
else:
raise ValueError('Expected an int or a list/tuple of ints for the '
'argument \'axis\', but received instead: %s' % axis)
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
def build(self, input_shape):
ndims = len(input_shape)
if ndims is None:
raise ValueError('Input shape %s has undefined rank.' % input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: {}'.format(tuple(self.axis)))
param_shape = [input_shape[dim] for dim in self.axis]
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
experimental_autocast=False)
else:
self.beta = None
def call(self, inputs):
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
# Calculate the moments on the last axis (layer activations).
mean, variance = nn.moments(inputs, self.axis, keep_dims=True)
# Broadcasting only necessary for norm where the axis is not just
# the last dimension
broadcast_shape = [1] * ndims
for dim in self.axis:
broadcast_shape[dim] = input_shape.dims[dim].value
def _broadcast(v):
if (v is not None and len(v.shape) != ndims and
self.axis != [ndims - 1]):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
# Compute layer normalization using the batch_normalization function.
outputs = nn.batch_normalization(
inputs,
mean,
variance,
offset=offset,
scale=scale,
variance_epsilon=self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[] |
[] |
[
"QUANTEMU_CBLOCK_TYPE_BNORM_INPUTS",
"ENABLE_QUANTOP_BNORM",
"QUANTEMU_BNORM_DATA_TYPE",
"QUANTEMU_BNORM_RMODE_INPUTS",
"QUANTEMU_PRECISION_BNORM_INPUTS",
"QUANTEMU_CBLOCK_SIZE_INPUTS",
"QUANTEMU_EXPBITS"
] |
[]
|
["QUANTEMU_CBLOCK_TYPE_BNORM_INPUTS", "ENABLE_QUANTOP_BNORM", "QUANTEMU_BNORM_DATA_TYPE", "QUANTEMU_BNORM_RMODE_INPUTS", "QUANTEMU_PRECISION_BNORM_INPUTS", "QUANTEMU_CBLOCK_SIZE_INPUTS", "QUANTEMU_EXPBITS"]
|
python
| 7 | 0 | |
stz/client_test.go
|
package stz
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"os"
"testing"
"github.com/kylelemons/godebug/diff"
)
func TestGet(t *testing.T) {
const wID = "42"
var wkspc = workspace(t)
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if req.URL.String() != fmt.Sprintf("/workspace/%s", wID) {
t.Errorf("got path %s, expected %s", req.URL.String(), fmt.Sprintf("/workspace/%s", wID))
}
validateHeaders(t, req)
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusOK)
json.NewEncoder(rw).Encode(wkspc)
}))
defer server.Close()
// Substitute structurizr service host and scheme for tests.
host := Host
defer func() { Host = host }()
u, err := url.Parse(server.URL)
if err != nil {
t.Fatalf("failed to parse test server URL %q: %s", server.URL, err)
}
Host = u.Host
scheme := Scheme
defer func() { Scheme = scheme }()
Scheme = "http"
c := NewClient("key", "secret")
wk, err := c.Get(wID)
if err != nil {
t.Errorf("Get failed with %s", err)
}
js, err := json.MarshalIndent(wk, "", " ")
if err != nil {
t.Fatalf("failed to marshal response for comparison: %s", err)
}
js2, _ := json.MarshalIndent(wkspc, "", " ")
dif := diff.Diff(string(js), string(js2))
if dif != "" {
t.Errorf("invalid response content, got vs. expected:\n%s", dif)
}
}
func TestAuth(t *testing.T) {
wid, key, secret := config(t)
c := NewClient(key, secret)
_, err := c.Get(wid)
if err != nil {
t.Errorf("failed to retrieve workspace: %s", err)
}
}
func validateHeaders(t *testing.T, req *http.Request) {
if req.Header.Get("nonce") == "" {
t.Errorf("missing nonce header")
}
if req.Header.Get("X-Authorization") == "" {
t.Errorf("missing X-Authorization header")
}
}
func workspace(t *testing.T) (workspace *Workspace) {
err := json.Unmarshal([]byte(bigBankPLC), &workspace)
if err != nil {
t.Fatalf("unable to load test workspace: %s", err)
}
return
}
func config(t *testing.T) (workspaceID, key, secret string) {
workspaceID = os.Getenv("STRUCTURIZR_WORKSPACE_ID")
if workspaceID == "" {
t.Skip("STRUCTURIZR_WORKSPACE_ID not set")
}
key = os.Getenv("STRUCTURIZR_KEY")
if key == "" {
t.Skip("STRUCTURIZR_KEY not set")
}
secret = os.Getenv("STRUCTURIZR_SECRET")
if secret == "" {
t.Skip("STRUCTURIZR_SECRET not set")
}
return
}
// Serialized workspace taken from
// https://raw.githubusercontent.com/structurizr/json/master/examples/big-bank-plc.json
var bigBankPLC = `{
"name": "Big Bank plc",
"description": "This is an example workspace to illustrate the key features of Structurizr, based around a fictional online banking system.",
"model": {
"enterprise": {
"name": "Big Bank plc"
},
"people": [
{
"id": "15",
"tags": "Element,Person,Bank Staff",
"name": "Back Office Staff",
"description": "Administration and support staff within the bank.",
"relationships": [
{
"id": "16",
"tags": "Relationship,Synchronous",
"sourceId": "15",
"destinationId": "4",
"description": "Uses",
"interactionStyle": "Synchronous"
}
],
"location": "Internal"
},
{
"id": "12",
"tags": "Element,Person,Bank Staff",
"name": "Customer Service Staff",
"description": "Customer service staff within the bank.",
"relationships": [
{
"id": "13",
"tags": "Relationship,Synchronous",
"sourceId": "12",
"destinationId": "4",
"description": "Uses",
"interactionStyle": "Synchronous"
}
],
"location": "Internal"
},
{
"id": "1",
"tags": "Element,Person",
"name": "Personal Banking Customer",
"description": "A customer of the bank, with personal bank accounts.",
"relationships": [
{
"id": "23",
"tags": "Relationship,Synchronous",
"sourceId": "1",
"destinationId": "17",
"description": "Views account balances, and makes payments using",
"interactionStyle": "Synchronous"
},
{
"id": "11",
"tags": "Relationship,Synchronous",
"sourceId": "1",
"destinationId": "9",
"description": "Withdraws cash using",
"interactionStyle": "Synchronous"
},
{
"id": "14",
"tags": "Relationship,Synchronous",
"sourceId": "1",
"destinationId": "12",
"description": "Asks questions to",
"technology": "Telephone",
"interactionStyle": "Synchronous"
},
{
"id": "3",
"tags": "Relationship,Synchronous",
"sourceId": "1",
"destinationId": "2",
"description": "Views account balances, and makes payments using",
"interactionStyle": "Synchronous"
},
{
"id": "24",
"tags": "Relationship,Synchronous",
"sourceId": "1",
"destinationId": "18",
"description": "Views account balances, and makes payments using",
"interactionStyle": "Synchronous"
},
{
"id": "22",
"tags": "Relationship,Synchronous",
"sourceId": "1",
"destinationId": "19",
"description": "Visits bigbank.com/ib using",
"technology": "HTTPS",
"interactionStyle": "Synchronous"
}
],
"location": "External"
}
],
"softwareSystems": [
{
"id": "9",
"tags": "Element,Software System,Existing System",
"name": "ATM",
"description": "Allows customers to withdraw cash.",
"relationships": [
{
"id": "10",
"tags": "Relationship,Synchronous",
"sourceId": "9",
"destinationId": "4",
"description": "Uses",
"interactionStyle": "Synchronous"
}
],
"location": "Internal"
},
{
"id": "6",
"tags": "Element,Software System,Existing System",
"name": "E-mail System",
"description": "The internal Microsoft Exchange e-mail system.",
"relationships": [
{
"id": "8",
"tags": "Relationship,Synchronous",
"sourceId": "6",
"destinationId": "1",
"description": "Sends e-mails to",
"interactionStyle": "Synchronous"
}
],
"location": "Internal"
},
{
"id": "2",
"tags": "Element,Software System",
"name": "Internet Banking System",
"description": "Allows customers to view information about their bank accounts, and make payments.",
"relationships": [
{
"id": "7",
"tags": "Relationship,Synchronous",
"sourceId": "2",
"destinationId": "6",
"description": "Sends e-mail using",
"interactionStyle": "Synchronous"
},
{
"id": "5",
"tags": "Relationship,Synchronous",
"sourceId": "2",
"destinationId": "4",
"description": "Gets account information from, and makes payments using",
"interactionStyle": "Synchronous"
}
],
"location": "Internal",
"containers": [
{
"id": "20",
"tags": "Element,Container",
"name": "API Application",
"description": "Provides Internet banking functionality via a JSON/HTTPS API.",
"relationships": [
{
"id": "27",
"tags": "Relationship,Synchronous",
"sourceId": "20",
"destinationId": "4",
"description": "Makes API calls to",
"technology": "XML/HTTPS",
"interactionStyle": "Synchronous"
},
{
"id": "26",
"tags": "Relationship,Synchronous",
"sourceId": "20",
"destinationId": "21",
"description": "Reads from and writes to",
"technology": "JDBC",
"interactionStyle": "Synchronous"
},
{
"id": "28",
"tags": "Relationship,Synchronous",
"sourceId": "20",
"destinationId": "6",
"description": "Sends e-mail using",
"technology": "SMTP",
"interactionStyle": "Synchronous"
}
],
"technology": "Java and Spring MVC",
"components": [
{
"id": "30",
"tags": "Element,Component",
"name": "Accounts Summary Controller",
"description": "Provides customers with a summary of their bank accounts.",
"relationships": [
{
"id": "42",
"tags": "Relationship,Synchronous",
"sourceId": "30",
"destinationId": "33",
"description": "Uses",
"interactionStyle": "Synchronous"
}
],
"technology": "Spring MVC Rest Controller",
"size": 0
},
{
"id": "34",
"tags": "Element,Component",
"name": "E-mail Component",
"description": "Sends e-mails to users.",
"relationships": [
{
"id": "47",
"tags": "Relationship,Synchronous",
"sourceId": "34",
"destinationId": "6",
"description": "Sends e-mail using",
"interactionStyle": "Synchronous"
}
],
"technology": "Spring Bean",
"size": 0
},
{
"id": "33",
"tags": "Element,Component",
"name": "Mainframe Banking System Facade",
"description": "A facade onto the mainframe banking system.",
"relationships": [
{
"id": "46",
"tags": "Relationship,Synchronous",
"sourceId": "33",
"destinationId": "4",
"description": "Uses",
"technology": "XML/HTTPS",
"interactionStyle": "Synchronous"
}
],
"technology": "Spring Bean",
"size": 0
},
{
"id": "31",
"tags": "Element,Component",
"name": "Reset Password Controller",
"description": "Allows users to reset their passwords with a single use URL.",
"relationships": [
{
"id": "44",
"tags": "Relationship,Synchronous",
"sourceId": "31",
"destinationId": "34",
"description": "Uses",
"interactionStyle": "Synchronous"
},
{
"id": "43",
"tags": "Relationship,Synchronous",
"sourceId": "31",
"destinationId": "32",
"description": "Uses",
"interactionStyle": "Synchronous"
}
],
"technology": "Spring MVC Rest Controller",
"size": 0
},
{
"id": "32",
"tags": "Element,Component",
"name": "Security Component",
"description": "Provides functionality related to signing in, changing passwords, etc.",
"relationships": [
{
"id": "45",
"tags": "Relationship,Synchronous",
"sourceId": "32",
"destinationId": "21",
"description": "Reads from and writes to",
"technology": "JDBC",
"interactionStyle": "Synchronous"
}
],
"technology": "Spring Bean",
"size": 0
},
{
"id": "29",
"tags": "Element,Component",
"name": "Sign In Controller",
"description": "Allows users to sign in to the Internet Banking System.",
"relationships": [
{
"id": "41",
"tags": "Relationship,Synchronous",
"sourceId": "29",
"destinationId": "32",
"description": "Uses",
"interactionStyle": "Synchronous"
}
],
"technology": "Spring MVC Rest Controller",
"size": 0
}
]
},
{
"id": "21",
"tags": "Element,Container,Database",
"name": "Database",
"description": "Stores user registration information, hashed authentication credentials, access logs, etc.",
"technology": "Oracle Database Schema"
},
{
"id": "18",
"tags": "Element,Container,Mobile App",
"name": "Mobile App",
"description": "Provides a limited subset of the Internet banking functionality to customers via their mobile device.",
"relationships": [
{
"id": "39",
"tags": "Relationship,Synchronous",
"sourceId": "18",
"destinationId": "31",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous"
},
{
"id": "49",
"tags": "Relationship,Synchronous",
"sourceId": "18",
"destinationId": "20",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous"
},
{
"id": "38",
"tags": "Relationship,Synchronous",
"sourceId": "18",
"destinationId": "29",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous"
},
{
"id": "40",
"tags": "Relationship,Synchronous",
"sourceId": "18",
"destinationId": "30",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous"
}
],
"technology": "Xamarin"
},
{
"id": "17",
"tags": "Element,Container,Web Browser",
"name": "Single-Page Application",
"description": "Provides all of the Internet banking functionality to customers via their web browser.",
"relationships": [
{
"id": "37",
"tags": "Relationship,Synchronous",
"sourceId": "17",
"destinationId": "30",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous"
},
{
"id": "35",
"tags": "Relationship,Synchronous",
"sourceId": "17",
"destinationId": "29",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous"
},
{
"id": "48",
"tags": "Relationship,Synchronous",
"sourceId": "17",
"destinationId": "20",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous"
},
{
"id": "36",
"tags": "Relationship,Synchronous",
"sourceId": "17",
"destinationId": "31",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous"
}
],
"technology": "JavaScript and Angular"
},
{
"id": "19",
"tags": "Element,Container",
"name": "Web Application",
"description": "Delivers the static content and the Internet banking single page application.",
"relationships": [
{
"id": "25",
"tags": "Relationship,Synchronous",
"sourceId": "19",
"destinationId": "17",
"description": "Delivers to the customer's web browser",
"interactionStyle": "Synchronous"
}
],
"technology": "Java and Spring MVC"
}
]
},
{
"id": "4",
"tags": "Element,Software System,Existing System",
"name": "Mainframe Banking System",
"description": "Stores all of the core banking information about customers, accounts, transactions, etc.",
"location": "Internal"
}
],
"deploymentNodes": [
{
"id": "50",
"tags": "Element,Deployment Node",
"name": "Developer Laptop",
"description": "A developer laptop.",
"environment": "Development",
"technology": "Microsoft Windows 10 or Apple macOS",
"instances": 1,
"children": [
{
"id": "55",
"tags": "Element,Deployment Node",
"name": "Docker Container - Database Server",
"description": "A Docker container.",
"environment": "Development",
"technology": "Docker",
"instances": 1,
"children": [
{
"id": "56",
"tags": "Element,Deployment Node",
"name": "Database Server",
"description": "A development database.",
"environment": "Development",
"technology": "Oracle 12c",
"instances": 1,
"containerInstances": [
{
"id": "57",
"tags": "Container Instance",
"environment": "Development",
"containerId": "21",
"instanceId": 1,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
},
{
"id": "51",
"tags": "Element,Deployment Node",
"name": "Docker Container - Web Server",
"description": "A Docker container.",
"environment": "Development",
"technology": "Docker",
"instances": 1,
"children": [
{
"id": "52",
"tags": "Element,Deployment Node",
"properties": {
"Java Version": "8",
"Xms": "1024M",
"Xmx": "512M"
},
"name": "Apache Tomcat",
"description": "An open source Java EE web server.",
"environment": "Development",
"technology": "Apache Tomcat 8.x",
"instances": 1,
"containerInstances": [
{
"id": "54",
"tags": "Container Instance",
"relationships": [
{
"id": "58",
"sourceId": "54",
"destinationId": "57",
"description": "Reads from and writes to",
"technology": "JDBC",
"interactionStyle": "Synchronous",
"linkedRelationshipId": "26"
}
],
"environment": "Development",
"containerId": "20",
"instanceId": 1,
"properties": {}
},
{
"id": "53",
"tags": "Container Instance",
"relationships": [
{
"id": "62",
"sourceId": "53",
"destinationId": "60",
"description": "Delivers to the customer's web browser",
"interactionStyle": "Synchronous",
"linkedRelationshipId": "25"
}
],
"environment": "Development",
"containerId": "19",
"instanceId": 1,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
},
{
"id": "59",
"tags": "Element,Deployment Node",
"name": "Web Browser",
"environment": "Development",
"technology": "Chrome, Firefox, Safari, or Edge",
"instances": 1,
"containerInstances": [
{
"id": "60",
"tags": "Container Instance",
"relationships": [
{
"id": "61",
"sourceId": "60",
"destinationId": "54",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous",
"linkedRelationshipId": "48"
}
],
"environment": "Development",
"containerId": "17",
"instanceId": 1,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
},
{
"id": "68",
"tags": "Element,Deployment Node",
"name": "Big Bank plc",
"environment": "Live",
"technology": "Big Bank plc data center",
"instances": 1,
"children": [
{
"id": "73",
"tags": "Element,Deployment Node",
"properties": {
"Location": "London and Reading"
},
"name": "bigbank-api***",
"description": "A web server residing in the web server farm, accessed via F5 BIG-IP LTMs.",
"environment": "Live",
"technology": "Ubuntu 16.04 LTS",
"instances": 8,
"children": [
{
"id": "74",
"tags": "Element,Deployment Node",
"properties": {
"Java Version": "8",
"Xms": "1024M",
"Xmx": "512M"
},
"name": "Apache Tomcat",
"description": "An open source Java EE web server.",
"environment": "Live",
"technology": "Apache Tomcat 8.x",
"instances": 1,
"containerInstances": [
{
"id": "75",
"tags": "Container Instance",
"relationships": [
{
"id": "81",
"sourceId": "75",
"destinationId": "80",
"description": "Reads from and writes to",
"technology": "JDBC",
"interactionStyle": "Synchronous",
"linkedRelationshipId": "26"
},
{
"id": "85",
"tags": "Failover",
"sourceId": "75",
"destinationId": "84",
"description": "Reads from and writes to",
"technology": "JDBC",
"interactionStyle": "Synchronous",
"linkedRelationshipId": "26"
}
],
"environment": "Live",
"containerId": "20",
"instanceId": 2,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
},
{
"id": "78",
"tags": "Element,Deployment Node",
"properties": {
"Location": "London"
},
"name": "bigbank-db01",
"description": "The primary database server.",
"environment": "Live",
"technology": "Ubuntu 16.04 LTS",
"instances": 1,
"children": [
{
"id": "79",
"tags": "Element,Deployment Node",
"name": "Oracle - Primary",
"description": "The primary, live database server.",
"relationships": [
{
"id": "86",
"tags": "Relationship,Synchronous",
"sourceId": "79",
"destinationId": "83",
"description": "Replicates data to",
"interactionStyle": "Synchronous"
}
],
"environment": "Live",
"technology": "Oracle 12c",
"instances": 1,
"containerInstances": [
{
"id": "80",
"tags": "Container Instance",
"environment": "Live",
"containerId": "21",
"instanceId": 2,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
},
{
"id": "82",
"tags": "Element,Deployment Node,Failover",
"properties": {
"Location": "Reading"
},
"name": "bigbank-db02",
"description": "The secondary database server.",
"environment": "Live",
"technology": "Ubuntu 16.04 LTS",
"instances": 1,
"children": [
{
"id": "83",
"tags": "Element,Deployment Node,Failover",
"name": "Oracle - Secondary",
"description": "A secondary, standby database server, used for failover purposes only.",
"environment": "Live",
"technology": "Oracle 12c",
"instances": 1,
"containerInstances": [
{
"id": "84",
"tags": "Container Instance,Failover",
"environment": "Live",
"containerId": "21",
"instanceId": 3,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
},
{
"id": "69",
"tags": "Element,Deployment Node",
"properties": {
"Location": "London and Reading"
},
"name": "bigbank-web***",
"description": "A web server residing in the web server farm, accessed via F5 BIG-IP LTMs.",
"environment": "Live",
"technology": "Ubuntu 16.04 LTS",
"instances": 4,
"children": [
{
"id": "70",
"tags": "Element,Deployment Node",
"properties": {
"Java Version": "8",
"Xms": "1024M",
"Xmx": "512M"
},
"name": "Apache Tomcat",
"description": "An open source Java EE web server.",
"environment": "Live",
"technology": "Apache Tomcat 8.x",
"instances": 1,
"containerInstances": [
{
"id": "71",
"tags": "Container Instance",
"relationships": [
{
"id": "72",
"sourceId": "71",
"destinationId": "67",
"description": "Delivers to the customer's web browser",
"interactionStyle": "Synchronous",
"linkedRelationshipId": "25"
}
],
"environment": "Live",
"containerId": "19",
"instanceId": 2,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
},
{
"id": "65",
"tags": "Element,Deployment Node",
"name": "Customer's computer",
"environment": "Live",
"technology": "Microsoft Windows or Apple macOS",
"instances": 1,
"children": [
{
"id": "66",
"tags": "Element,Deployment Node",
"name": "Web Browser",
"environment": "Live",
"technology": "Chrome, Firefox, Safari, or Edge",
"instances": 1,
"containerInstances": [
{
"id": "67",
"tags": "Container Instance",
"relationships": [
{
"id": "77",
"sourceId": "67",
"destinationId": "75",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous",
"linkedRelationshipId": "48"
}
],
"environment": "Live",
"containerId": "17",
"instanceId": 2,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
],
"containerInstances": [],
"infrastructureNodes": []
},
{
"id": "63",
"tags": "Element,Deployment Node",
"name": "Customer's mobile device",
"environment": "Live",
"technology": "Apple iOS or Android",
"instances": 1,
"containerInstances": [
{
"id": "64",
"tags": "Container Instance",
"relationships": [
{
"id": "76",
"sourceId": "64",
"destinationId": "75",
"description": "Makes API calls to",
"technology": "JSON/HTTPS",
"interactionStyle": "Synchronous",
"linkedRelationshipId": "49"
}
],
"environment": "Live",
"containerId": "18",
"instanceId": 1,
"properties": {}
}
],
"children": [],
"infrastructureNodes": []
}
]
},
"documentation": {
"sections": [
{
"elementId": "2",
"title": "Context",
"order": 1,
"format": "Markdown",
"content": "Here is some context about the Internet Banking System...\n\n\n### Internet Banking System\n...\n### Mainframe Banking System\n...\n"
},
{
"elementId": "19",
"title": "Components",
"order": 3,
"format": "Markdown",
"content": "Here is some information about the API Application...\n\n### Sign in process\nHere is some information about the Sign In Controller, including how the sign in process works...\n"
},
{
"elementId": "2",
"title": "Development Environment",
"order": 4,
"format": "AsciiDoc",
"content": "Here is some information about how to set up a development environment for the Internet Banking System...\nimage::embed:DevelopmentDeployment[]"
},
{
"elementId": "2",
"title": "Containers",
"order": 2,
"format": "Markdown",
"content": "Here is some information about the containers within the Internet Banking System...\n\n### Web Application\n...\n### Database\n...\n"
},
{
"elementId": "2",
"title": "Deployment",
"order": 5,
"format": "AsciiDoc",
"content": "Here is some information about the live deployment environment for the Internet Banking System...\nimage::embed:LiveDeployment[]"
}
],
"template": {
"name": "Software Guidebook",
"author": "Simon Brown",
"url": "https://leanpub.com/visualising-software-architecture"
},
"decisions": [],
"images": []
},
"views": {
"systemLandscapeViews": [
{
"description": "The system landscape diagram for Big Bank plc.",
"key": "SystemLandscape",
"paperSize": "A5_Landscape",
"animations": [
{
"order": 1,
"elements": [
"1",
"2",
"4",
"6"
],
"relationships": [
"3",
"5",
"7",
"8"
]
},
{
"order": 2,
"elements": [
"9"
],
"relationships": [
"11",
"10"
]
},
{
"order": 3,
"elements": [
"12",
"15"
],
"relationships": [
"13",
"14",
"16"
]
}
],
"enterpriseBoundaryVisible": true,
"elements": [
{
"id": "1",
"x": 87,
"y": 643
},
{
"id": "12",
"x": 1947,
"y": 36
},
{
"id": "2",
"x": 1012,
"y": 813
},
{
"id": "4",
"x": 1922,
"y": 693
},
{
"id": "15",
"x": 1947,
"y": 1241
},
{
"id": "6",
"x": 1012,
"y": 1326
},
{
"id": "9",
"x": 1012,
"y": 301
}
],
"relationships": [
{
"id": "16"
},
{
"id": "3"
},
{
"id": "14",
"vertices": [
{
"x": 285,
"y": 240
}
]
},
{
"id": "5"
},
{
"id": "13"
},
{
"id": "11"
},
{
"id": "7"
},
{
"id": "8"
},
{
"id": "10"
}
]
}
],
"systemContextViews": [
{
"softwareSystemId": "2",
"description": "The system context diagram for the Internet Banking System.",
"key": "SystemContext",
"paperSize": "A5_Landscape",
"animations": [
{
"order": 1,
"elements": [
"2"
],
"relationships": []
},
{
"order": 2,
"elements": [
"1"
],
"relationships": [
"3"
]
},
{
"order": 3,
"elements": [
"4"
],
"relationships": [
"5"
]
},
{
"order": 4,
"elements": [
"6"
],
"relationships": [
"7",
"8"
]
}
],
"enterpriseBoundaryVisible": false,
"elements": [
{
"id": "1",
"x": 632,
"y": 69
},
{
"id": "2",
"x": 607,
"y": 714
},
{
"id": "4",
"x": 607,
"y": 1259
},
{
"id": "6",
"x": 1422,
"y": 714
}
],
"relationships": [
{
"id": "3"
},
{
"id": "5"
},
{
"id": "7"
},
{
"id": "8"
}
]
}
],
"containerViews": [
{
"softwareSystemId": "2",
"description": "The container diagram for the Internet Banking System.",
"key": "Containers",
"paperSize": "A5_Landscape",
"animations": [
{
"order": 1,
"elements": [
"1",
"4",
"6"
],
"relationships": [
"8"
]
},
{
"order": 2,
"elements": [
"19"
],
"relationships": [
"22"
]
},
{
"order": 3,
"elements": [
"17"
],
"relationships": [
"23",
"25"
]
},
{
"order": 4,
"elements": [
"18"
],
"relationships": [
"24"
]
},
{
"order": 5,
"elements": [
"20"
],
"relationships": [
"48",
"27",
"49",
"28"
]
},
{
"order": 6,
"elements": [
"21"
],
"relationships": [
"26"
]
}
],
"externalSoftwareSystemBoundariesVisible": false,
"elements": [
{
"id": "1",
"x": 1056,
"y": 24
},
{
"id": "4",
"x": 2012,
"y": 1214
},
{
"id": "17",
"x": 780,
"y": 664
},
{
"id": "6",
"x": 2012,
"y": 664
},
{
"id": "18",
"x": 1283,
"y": 664
},
{
"id": "19",
"x": 37,
"y": 664
},
{
"id": "20",
"x": 1031,
"y": 1214
},
{
"id": "21",
"x": 37,
"y": 1214
}
],
"relationships": [
{
"id": "28"
},
{
"id": "27"
},
{
"id": "26"
},
{
"id": "25"
},
{
"id": "24"
},
{
"id": "23"
},
{
"id": "22"
},
{
"id": "8"
},
{
"id": "48"
},
{
"id": "49"
}
]
}
],
"componentViews": [
{
"description": "The component diagram for the API Application.",
"key": "Components",
"paperSize": "A5_Landscape",
"animations": [
{
"order": 1,
"elements": [
"4",
"17",
"6",
"18",
"21"
],
"relationships": []
},
{
"order": 2,
"elements": [
"29",
"32"
],
"relationships": [
"45",
"35",
"38",
"41"
]
},
{
"order": 3,
"elements": [
"33",
"30"
],
"relationships": [
"46",
"37",
"40",
"42"
]
},
{
"order": 4,
"elements": [
"34",
"31"
],
"relationships": [
"44",
"36",
"47",
"39",
"43"
]
}
],
"containerId": "20",
"elements": [
{
"id": "33",
"x": 1925,
"y": 817
},
{
"id": "34",
"x": 1015,
"y": 817
},
{
"id": "4",
"x": 1925,
"y": 1307
},
{
"id": "17",
"x": 560,
"y": 10
},
{
"id": "6",
"x": 1015,
"y": 1307
},
{
"id": "18",
"x": 1470,
"y": 11
},
{
"id": "29",
"x": 105,
"y": 436
},
{
"id": "30",
"x": 1925,
"y": 436
},
{
"id": "31",
"x": 1015,
"y": 436
},
{
"id": "21",
"x": 105,
"y": 1307
},
{
"id": "32",
"x": 105,
"y": 817
}
],
"relationships": [
{
"id": "40",
"position": 40
},
{
"id": "41",
"position": 55
},
{
"id": "42",
"position": 50
},
{
"id": "43"
},
{
"id": "37",
"position": 85
},
{
"id": "36",
"position": 45
},
{
"id": "35",
"position": 35
},
{
"id": "44"
},
{
"id": "45",
"position": 60
},
{
"id": "46"
},
{
"id": "47"
},
{
"id": "38",
"position": 85
},
{
"id": "39",
"position": 40
}
]
}
],
"dynamicViews": [
{
"description": "Summarises how the sign in feature works in the single-page application.",
"key": "SignIn",
"paperSize": "A5_Landscape",
"elementId": "20",
"relationships": [
{
"id": "35",
"description": "Submits credentials to",
"order": "1"
},
{
"id": "41",
"description": "Calls isAuthenticated() on",
"order": "2"
},
{
"id": "45",
"description": "select * from users where username = ?",
"order": "3"
}
],
"elements": [
{
"id": "17",
"x": 552,
"y": 211
},
{
"id": "29",
"x": 1477,
"y": 211
},
{
"id": "32",
"x": 1477,
"y": 1116
},
{
"id": "21",
"x": 552,
"y": 1116
}
]
}
],
"deploymentViews": [
{
"softwareSystemId": "2",
"description": "An example live deployment scenario for the Internet Banking System.",
"key": "LiveDeployment",
"paperSize": "A5_Landscape",
"environment": "Live",
"animations": [
{
"order": 1,
"elements": [
"66",
"67",
"65"
]
},
{
"order": 2,
"elements": [
"63",
"64"
]
},
{
"order": 3,
"elements": [
"68",
"69",
"70",
"71",
"73",
"74",
"75"
],
"relationships": [
"77",
"72",
"76"
]
},
{
"order": 4,
"elements": [
"78",
"79",
"80"
],
"relationships": [
"81"
]
},
{
"order": 5,
"elements": [
"82",
"83",
"84"
],
"relationships": [
"85",
"86"
]
}
],
"elements": [
{
"id": "66",
"x": 0,
"y": 0
},
{
"id": "78",
"x": 0,
"y": 0
},
{
"id": "67",
"x": 150,
"y": 1026
},
{
"id": "79",
"x": 0,
"y": 0
},
{
"id": "68",
"x": 0,
"y": 0
},
{
"id": "69",
"x": 0,
"y": 0
},
{
"id": "80",
"x": 1820,
"y": 176
},
{
"id": "70",
"x": 0,
"y": 0
},
{
"id": "71",
"x": 985,
"y": 1026
},
{
"id": "82",
"x": 0,
"y": 0
},
{
"id": "83",
"x": 0,
"y": 0
},
{
"id": "84",
"x": 1820,
"y": 1026
},
{
"id": "73",
"x": 0,
"y": 0
},
{
"id": "74",
"x": 0,
"y": 0
},
{
"id": "63",
"x": 0,
"y": 0
},
{
"id": "75",
"x": 985,
"y": 176
},
{
"id": "64",
"x": 150,
"y": 176
},
{
"id": "65",
"x": 0,
"y": 0
}
],
"relationships": [
{
"id": "72"
},
{
"id": "81"
},
{
"id": "86"
},
{
"id": "76"
},
{
"id": "85"
},
{
"id": "77"
}
]
},
{
"softwareSystemId": "2",
"description": "An example development deployment scenario for the Internet Banking System.",
"key": "DevelopmentDeployment",
"paperSize": "A5_Landscape",
"environment": "Development",
"animations": [
{
"order": 1,
"elements": [
"59",
"60",
"50"
]
},
{
"order": 2,
"elements": [
"51",
"52",
"53",
"54"
],
"relationships": [
"61",
"62"
]
},
{
"order": 3,
"elements": [
"55",
"56",
"57"
],
"relationships": [
"58"
]
}
],
"elements": [
{
"id": "55",
"x": 0,
"y": 0
},
{
"id": "56",
"x": 0,
"y": 0
},
{
"id": "57",
"x": 1840,
"y": 834
},
{
"id": "59",
"x": 0,
"y": 0
},
{
"id": "60",
"x": 140,
"y": 664
},
{
"id": "50",
"x": 0,
"y": 0
},
{
"id": "51",
"x": 0,
"y": 0
},
{
"id": "52",
"x": 0,
"y": 0
},
{
"id": "53",
"x": 990,
"y": 494
},
{
"id": "54",
"x": 990,
"y": 834
}
],
"relationships": [
{
"id": "61"
},
{
"id": "62"
},
{
"id": "58",
"position": 50
}
]
}
],
"configuration": {
"branding": {},
"styles": {
"elements": [
{
"tag": "Element"
},
{
"tag": "Software System",
"background": "#1168bd",
"color": "#ffffff"
},
{
"tag": "Container",
"background": "#438dd5",
"color": "#ffffff"
},
{
"tag": "Component",
"background": "#85bbf0",
"color": "#000000"
},
{
"tag": "Person",
"background": "#08427b",
"color": "#ffffff",
"fontSize": 22,
"shape": "Person"
},
{
"tag": "Existing System",
"background": "#999999",
"color": "#ffffff"
},
{
"tag": "Bank Staff",
"background": "#999999",
"color": "#ffffff"
},
{
"tag": "Web Browser",
"shape": "WebBrowser"
},
{
"tag": "Mobile App",
"shape": "MobileDeviceLandscape"
},
{
"tag": "Database",
"shape": "Cylinder"
},
{
"tag": "Failover",
"opacity": 25
}
],
"relationships": [
{
"tag": "Failover",
"position": 70,
"opacity": 25
}
]
},
"terminology": {},
"lastSavedView": "Components",
"themes": []
},
"filteredViews": []
}
}
`
|
[
"\"STRUCTURIZR_WORKSPACE_ID\"",
"\"STRUCTURIZR_KEY\"",
"\"STRUCTURIZR_SECRET\""
] |
[] |
[
"STRUCTURIZR_KEY",
"STRUCTURIZR_WORKSPACE_ID",
"STRUCTURIZR_SECRET"
] |
[]
|
["STRUCTURIZR_KEY", "STRUCTURIZR_WORKSPACE_ID", "STRUCTURIZR_SECRET"]
|
go
| 3 | 0 | |
adeft/locations.py
|
"""
Contains paths to locations on user's system where models and resources are
to be stored. These all live in adeft's home folder which defaults to the
hidden directory ".adeft" in the user's home directory but which can be
specified by setting the environment variable ADEFT_HOME in the user's profile.
"""
import os
from adeft import __version__
ADEFT_HOME = os.environ.get('ADEFT_HOME')
if ADEFT_HOME is None:
ADEFT_HOME = os.path.join(os.path.expanduser('~'), '.adeft')
ADEFT_PATH = os.path.join(ADEFT_HOME, __version__)
ADEFT_MODELS_PATH = os.path.join(ADEFT_PATH, 'models')
RESOURCES_PATH = os.path.join(ADEFT_PATH, 'resources')
GROUNDINGS_FILE_PATH = os.path.join(RESOURCES_PATH, 'groundings.csv')
TEST_RESOURCES_PATH = os.path.join(ADEFT_PATH, 'test_resources')
S3_BUCKET_URL = os.path.join('http://adeft.s3.amazonaws.com', __version__)
|
[] |
[] |
[
"ADEFT_HOME"
] |
[]
|
["ADEFT_HOME"]
|
python
| 1 | 0 | |
src/main/java/com/oracle/truffle/polyglot/PolyglotContextConfig.java
|
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* The Universal Permissive License (UPL), Version 1.0
*
* Subject to the condition set forth below, permission is hereby granted to any
* person obtaining a copy of this software, associated documentation and/or
* data (collectively the "Software"), free of charge and under any and all
* copyright rights in the Software, and any and all patent rights owned or
* freely licensable by each licensor hereunder covering either (i) the
* unmodified Software as contributed to or provided by such licensor, or (ii)
* the Larger Works (as defined below), to deal in both
*
* (a) the Software, and
*
* (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
* one is included with the Software each a "Larger Work" to which the Software
* is contributed by such licensors),
*
* without restriction, including without limitation the rights to copy, create
* derivative works of, display, perform, and distribute the Software and make,
* use, sell, offer for sale, import, export, have made, and have sold the
* Software and the Larger Work(s), and to sublicense the foregoing rights on
* either these or other terms.
*
* This license is subject to the following condition:
*
* The above copyright notice and either this complete permission notice or at a
* minimum a reference to the UPL must be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oracle.truffle.polyglot;
import java.io.InputStream;
import java.io.OutputStream;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Predicate;
import java.util.logging.Handler;
import java.util.logging.Level;
import org.graalvm.collections.EconomicSet;
import org.graalvm.collections.UnmodifiableEconomicSet;
import org.graalvm.polyglot.EnvironmentAccess;
import org.graalvm.polyglot.HostAccess;
import org.graalvm.polyglot.PolyglotAccess;
import org.graalvm.polyglot.io.FileSystem;
import org.graalvm.polyglot.io.ProcessHandler;
import com.oracle.truffle.api.CompilerDirectives.CompilationFinal;
import com.oracle.truffle.polyglot.PolyglotImpl.VMObject;
final class PolyglotContextConfig {
private static final String[] EMPTY_STRING_ARRAY = new String[0];
final OutputStream out;
final OutputStream err;
final InputStream in;
final boolean hostLookupAllowed;
final boolean nativeAccessAllowed;
final boolean createThreadAllowed;
final boolean hostClassLoadingAllowed;
final boolean createProcessAllowed;
final Predicate<String> classFilter;
private final Map<String, String[]> applicationArguments;
final EconomicSet<String> allowedPublicLanguages;
private final Map<String, OptionValuesImpl> optionsById;
@CompilationFinal FileSystem fileSystem;
@CompilationFinal FileSystem internalFileSystem;
final Map<String, Level> logLevels; // effectively final
final Handler logHandler;
final PolyglotAccess polyglotAccess;
final ProcessHandler processHandler;
private final EnvironmentAccess environmentAccess;
private final Map<String, String> environment;
private volatile Map<String, String> configuredEnvironement;
private volatile ZoneId timeZone;
final PolyglotLimits limits;
final ClassLoader hostClassLoader;
private final List<PolyglotInstrument> configuredInstruments;
final HostAccess hostAccess;
final boolean allowValueSharing;
PolyglotContextConfig(PolyglotEngineImpl engine, OutputStream out, OutputStream err, InputStream in,
boolean hostLookupAllowed, PolyglotAccess polyglotAccess, boolean nativeAccessAllowed, boolean createThreadAllowed,
boolean hostClassLoadingAllowed, boolean allowExperimentalOptions,
Predicate<String> classFilter, Map<String, String[]> applicationArguments,
EconomicSet<String> allowedPublicLanguages, Map<String, String> options, FileSystem publicFileSystem, FileSystem internalFileSystem, Handler logHandler,
boolean createProcessAllowed, ProcessHandler processHandler, EnvironmentAccess environmentAccess, Map<String, String> environment,
ZoneId timeZone, PolyglotLimits limits, ClassLoader hostClassLoader, HostAccess hostAccess, boolean allowValueSharing) {
assert out != null;
assert err != null;
assert in != null;
assert environmentAccess != null;
this.out = out;
this.err = err;
this.in = in;
this.hostLookupAllowed = hostLookupAllowed;
this.polyglotAccess = polyglotAccess;
this.nativeAccessAllowed = nativeAccessAllowed;
this.createThreadAllowed = createThreadAllowed;
this.hostClassLoadingAllowed = hostClassLoadingAllowed;
this.createProcessAllowed = createProcessAllowed;
this.classFilter = classFilter;
this.applicationArguments = applicationArguments;
this.allowedPublicLanguages = allowedPublicLanguages;
this.fileSystem = publicFileSystem;
this.internalFileSystem = internalFileSystem;
this.optionsById = new HashMap<>();
this.logHandler = logHandler;
this.timeZone = timeZone;
this.limits = limits;
this.logLevels = new HashMap<>(engine.logLevels);
this.allowValueSharing = allowValueSharing;
List<PolyglotInstrument> instruments = null;
for (String optionKey : options.keySet()) {
final String group = PolyglotEngineImpl.parseOptionGroup(optionKey);
if (group.equals(PolyglotEngineImpl.OPTION_GROUP_LOG)) {
logLevels.put(PolyglotEngineImpl.parseLoggerName(optionKey), Level.parse(options.get(optionKey)));
continue;
}
VMObject object = findObjectForContextOption(engine, optionKey, group);
String id;
OptionValuesImpl engineOptionValues;
if (object instanceof PolyglotLanguage) {
PolyglotLanguage language = (PolyglotLanguage) object;
id = language.getId();
engineOptionValues = language.getOptionValues();
} else if (object instanceof PolyglotInstrument) {
PolyglotInstrument instrument = (PolyglotInstrument) object;
id = instrument.getId();
engineOptionValues = instrument.getEngineOptionValues();
if (instruments == null) {
instruments = new ArrayList<>();
}
instruments.add(instrument);
} else {
throw new AssertionError("invalid vm object");
}
OptionValuesImpl targetOptions = optionsById.get(id);
if (targetOptions == null) {
targetOptions = engineOptionValues.copy();
optionsById.put(id, targetOptions);
}
targetOptions.put(optionKey, options.get(optionKey), allowExperimentalOptions);
}
this.configuredInstruments = instruments == null ? Collections.emptyList() : instruments;
this.processHandler = processHandler;
this.environmentAccess = environmentAccess;
this.environment = environment == null ? Collections.emptyMap() : environment;
this.hostAccess = hostAccess;
this.hostClassLoader = hostClassLoader;
}
public ZoneId getTimeZone() {
ZoneId zone = this.timeZone;
if (zone == null) {
zone = timeZone = ZoneId.systemDefault();
}
return zone;
}
boolean isAccessPermitted(PolyglotLanguage from, PolyglotLanguage to) {
if (to.isHost() || to.cache.isInternal()) {
// everyone has access to host or internal languages
return true;
}
if (from == to) {
return true;
}
if (from == null) {
// embedder access
if (allowedPublicLanguages.contains(to.info.getId())) {
return true;
}
} else {
// language access
if (polyglotAccess == PolyglotAccess.ALL) {
if (allowedPublicLanguages.contains(to.info.getId())) {
return true;
}
} else {
if (from == to) {
return true;
}
UnmodifiableEconomicSet<String> configuredAccess = from.engine.getAPIAccess().getEvalAccess(polyglotAccess, from.getId());
if (configuredAccess != null && configuredAccess.contains(to.getId())) {
return true;
}
}
if (from.dependsOn(to)) {
return true;
}
}
return false;
}
String[] getApplicationArguments(PolyglotLanguage lang) {
String[] args = applicationArguments.get(lang.getId());
if (args == null) {
args = EMPTY_STRING_ARRAY;
}
return args;
}
OptionValuesImpl getLanguageOptionValues(PolyglotLanguage lang) {
OptionValuesImpl values = optionsById.get(lang.getId());
if (values == null) {
values = lang.getOptionValues();
}
return values.copy();
}
OptionValuesImpl getInstrumentOptionValues(PolyglotInstrument instrument) {
OptionValuesImpl values = optionsById.get(instrument.getId());
if (values == null) {
values = instrument.getEngineOptionValues();
}
return values.copy();
}
/**
* Returns a list of instruments with options for this context. Does not include instruments
* only configured for the engine.
*/
Collection<? extends PolyglotInstrument> getConfiguredInstruments() {
return configuredInstruments;
}
Map<String, String> getEnvironment() {
Map<String, String> result = configuredEnvironement;
if (result == null) {
synchronized (this) {
result = configuredEnvironement;
if (result == null) {
if (environmentAccess == EnvironmentAccess.NONE) {
result = Collections.unmodifiableMap(environment);
} else if (PolyglotEngineImpl.ALLOW_ENVIRONMENT_ACCESS && environmentAccess == EnvironmentAccess.INHERIT) {
result = System.getenv(); // System.getenv returns unmodifiable map.
if (!environment.isEmpty()) {
result = new HashMap<>(result);
result.putAll(environment);
result = Collections.unmodifiableMap(result);
}
} else {
throw PolyglotEngineException.unsupported(String.format("Unsupported EnvironmentAccess: %s", environmentAccess));
}
configuredEnvironement = result;
}
}
}
return result;
}
private static VMObject findObjectForContextOption(PolyglotEngineImpl engine, final String optionKey, String group) {
PolyglotLanguage language = engine.idToLanguage.get(group);
if (language == null) {
PolyglotInstrument instrument = engine.idToInstrument.get(group);
if (instrument != null) {
if (instrument.getEngineOptionsInternal().get(optionKey) != null) {
throw PolyglotEngineException.illegalArgument(
"Option " + optionKey +
" is an engine level instrument option. Engine level instrument options can only be configured for contexts without an explicit engine set." +
" To resolve this, configure the option when creating the Engine or create a context without a shared engine.");
}
return instrument;
}
if (group.equals(PolyglotEngineImpl.OPTION_GROUP_ENGINE)) {
// Test that "engine options" are not present among the options designated for
// this context
if (engine.getAllOptions().get(optionKey) != null) {
throw PolyglotEngineException.illegalArgument(
"Option " + optionKey + " is an engine option. Engine level options can only be configured for contexts without a shared engine set." +
" To resolve this, configure the option when creating the Engine or create a context without a shared engine.");
}
}
throw OptionValuesImpl.failNotFound(engine.getAllOptions(), optionKey);
} else {
// there should not be any overlaps -> engine creation should already fail
assert !group.equals(PolyglotEngineImpl.OPTION_GROUP_ENGINE);
}
return language;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
spyder/plugins/editor/widgets/tests/test_editor.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for editor.py
"""
# Standard library imports
import os
from sys import platform
try:
from unittest.mock import Mock, MagicMock
except ImportError:
from mock import Mock, MagicMock # Python 2
# Third party imports
import pytest
from flaky import flaky
from qtpy.QtCore import Qt
from qtpy.QtGui import QTextCursor
# Local imports
from spyder.plugins.editor.widgets.tests.fixtures import setup_editor
from spyder.plugins.editor.widgets.editor import EditorStack, EditorSplitter
from spyder.widgets.findreplace import FindReplace
from spyder.py3compat import PY2
# Qt Test Fixtures
#--------------------------------
@pytest.fixture
def base_editor_bot(qtbot):
editor_stack = EditorStack(None, [])
editor_stack.set_find_widget(Mock())
editor_stack.set_io_actions(Mock(), Mock(), Mock(), Mock())
return editor_stack
@pytest.fixture
def editor_bot(base_editor_bot, qtbot):
"""
Set up EditorStack with CodeEditor containing some Python code.
The cursor is at the empty line below the code.
Returns tuple with EditorStack and CodeEditor.
"""
editor_stack = base_editor_bot
text = ('a = 1\n'
'print(a)\n'
'\n'
'x = 2') # a newline is added at end
finfo = editor_stack.new('foo.py', 'utf-8', text)
qtbot.addWidget(editor_stack)
return editor_stack, finfo.editor
@pytest.fixture
def editor_find_replace_bot(base_editor_bot, qtbot):
editor_stack = base_editor_bot
text = ('spam bacon\n'
'spam sausage\n'
'spam egg')
finfo = editor_stack.new('spam.py', 'utf-8', text)
find_replace = FindReplace(None, enable_replace=True)
editor_stack.set_find_widget(find_replace)
find_replace.set_editor(finfo.editor)
qtbot.addWidget(editor_stack)
qtbot.addWidget(find_replace)
return editor_stack, finfo.editor, find_replace
@pytest.fixture
def editor_cells_bot(base_editor_bot, qtbot):
editor_stack = base_editor_bot
text = ('# %%\n'
'# 1 cell\n'
'# print(1)\n'
'# %%\n'
'# 2 cell\n'
'# print(2)\n'
'# %%\n'
'# 3 cell\n'
'# print(3)\n')
finfo = editor_stack.new('cells.py', 'utf-8', text)
find_replace = FindReplace(None, enable_replace=True)
qtbot.addWidget(editor_stack)
return editor_stack, finfo.editor
@pytest.fixture
def editor_folding_bot(base_editor_bot, qtbot):
"""
Setup CodeEditor with some text useful for folding related tests.
"""
editor_stack = base_editor_bot
text = ('# dummy test file\n'
'class a():\n' # fold-block level-0
' self.b = 1\n'
' print(self.b)\n'
' \n'
)
finfo = editor_stack.new('foo.py', 'utf-8', text)
find_replace = FindReplace(None, enable_replace=True)
editor_stack.set_find_widget(find_replace)
find_replace.set_editor(finfo.editor)
qtbot.addWidget(editor_stack)
qtbot.addWidget(find_replace)
return editor_stack, finfo.editor, find_replace
# Tests
#-------------------------------
def test_find_number_matches(setup_editor):
"""Test for number matches in find/replace."""
editor_stack, editor = setup_editor
editor_stack.find_widget.case_button.setChecked(True)
text = ' test \nTEST \nTest \ntesT '
editor.set_text(text)
editor_stack.find_widget.search_text.add_text('test')
editor_stack.find_widget.find(changed=False, forward=True,
rehighlight=False,
multiline_replace_check=False)
editor_text = editor_stack.find_widget.number_matches_text.text()
assert editor_text == '1 of 1'
editor_stack.find_widget.search_text.add_text('fail')
editor_stack.find_widget.find(changed=False, forward=True,
rehighlight=False,
multiline_replace_check=False)
editor_text = editor_stack.find_widget.number_matches_text.text()
assert editor_text == 'no matches'
def test_move_current_line_up(editor_bot):
editor_stack, editor = editor_bot
# Move second line up when nothing is selected.
editor.go_to_line(2)
editor.move_line_up()
expected_new_text = ('print(a)\n'
'a = 1\n'
'\n'
'x = 2\n')
assert editor.toPlainText() == expected_new_text
# Move line up when already at the top.
editor.move_line_up()
assert editor.toPlainText() == expected_new_text
# Move fourth line up when part of the line is selected.
editor.go_to_line(4)
editor.moveCursor(QTextCursor.Right, QTextCursor.MoveAnchor)
for i in range(2):
editor.moveCursor(QTextCursor.Right, QTextCursor.KeepAnchor)
editor.move_line_up()
expected_new_text = ('print(a)\n'
'a = 1\n'
'x = 2\n'
'\n')
assert editor.toPlainText()[:] == expected_new_text
def test_move_current_line_down(editor_bot):
editor_stack, editor = editor_bot
# Move fourth line down when nothing is selected.
editor.go_to_line(4)
editor.move_line_down()
expected_new_text = ('a = 1\n'
'print(a)\n'
'\n'
'\n'
'x = 2')
assert editor.toPlainText() == expected_new_text
# Move line down when already at the bottom.
editor.move_line_down()
assert editor.toPlainText() == expected_new_text
# Move first line down when part of the line is selected.
editor.go_to_line(1)
editor.moveCursor(QTextCursor.Right, QTextCursor.MoveAnchor)
for i in range(2):
editor.moveCursor(QTextCursor.Right, QTextCursor.KeepAnchor)
editor.move_line_down()
expected_new_text = ('print(a)\n'
'a = 1\n'
'\n'
'\n'
'x = 2')
assert editor.toPlainText() == expected_new_text
def test_move_multiple_lines_up(editor_bot):
editor_stack, editor = editor_bot
# Move second and third lines up.
editor.go_to_line(2)
cursor = editor.textCursor()
cursor.movePosition(QTextCursor.Down, QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.Right, QTextCursor.KeepAnchor)
editor.setTextCursor(cursor)
editor.move_line_up()
expected_new_text = ('print(a)\n'
'\n'
'a = 1\n'
'x = 2\n')
assert editor.toPlainText() == expected_new_text
# Move first and second lines up (to test already at top condition).
editor.move_line_up()
assert editor.toPlainText() == expected_new_text
def test_move_multiple_lines_down(editor_bot):
editor_stack, editor = editor_bot
# Move third and fourth lines down.
editor.go_to_line(3)
cursor = editor.textCursor()
cursor.movePosition(QTextCursor.Down, QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.Right, QTextCursor.KeepAnchor)
editor.setTextCursor(cursor)
editor.move_line_down()
expected_new_text = ('a = 1\n'
'print(a)\n'
'\n'
'\n'
'x = 2')
assert editor.toPlainText() == expected_new_text
# Move fourht and fifth lines down (to test already at bottom condition).
editor.move_line_down()
assert editor.toPlainText() == expected_new_text
def test_run_top_line(editor_bot, qtbot):
editor_stack, editor = editor_bot
editor.go_to_line(1) # line number is one based
editor.move_cursor(3)
with qtbot.waitSignal(editor_stack.exec_in_extconsole) as blocker:
editor_stack.run_selection()
assert blocker.signal_triggered
assert blocker.args[0] == 'a = 1'
# check cursor moves to start of next line; note line number is zero based
assert editor.get_cursor_line_column() == (1, 0)
def test_run_last_nonempty_line(editor_bot, qtbot):
editor_stack, editor = editor_bot
editor.go_to_line(4)
with qtbot.waitSignal(editor_stack.exec_in_extconsole) as blocker:
editor_stack.run_selection()
assert blocker.signal_triggered
assert blocker.args[0] == 'x = 2'
assert editor.get_cursor_line_column() == (4, 0) # check cursor moves down
def test_run_empty_line_in_middle(editor_bot, qtbot):
editor_stack, editor = editor_bot
editor.go_to_line(3)
with qtbot.assertNotEmitted(editor_stack.exec_in_extconsole):
editor_stack.run_selection()
assert editor.get_cursor_line_column() == (3, 0) # check cursor moves down
def test_run_last_line_when_empty(editor_bot, qtbot):
editor_stack, editor = editor_bot
with qtbot.assertNotEmitted(editor_stack.exec_in_extconsole):
editor_stack.run_selection()
# check cursor doesn't move
assert editor.get_cursor_line_column() == (4, 0)
def test_run_last_line_when_nonempty(editor_bot, qtbot):
editor_stack, editor = editor_bot
editor.stdkey_backspace() # delete empty line at end
old_text = editor.toPlainText()
with qtbot.waitSignal(editor_stack.exec_in_extconsole) as blocker:
editor_stack.run_selection()
assert blocker.signal_triggered
assert blocker.args[0] == 'x = 2'
expected_new_text = old_text + editor.get_line_separator()
# check blank line got added
assert editor.toPlainText() == expected_new_text
assert editor.get_cursor_line_column() == (4, 0) # check cursor moves down
def test_find_replace_case_sensitive(setup_editor):
editor_stack, editor = setup_editor
editor_stack.find_widget.case_button.setChecked(True)
text = ' test \nTEST \nTest \ntesT '
editor.set_text(text)
editor_stack.find_widget.search_text.add_text('test')
editor_stack.find_widget.replace_text.add_text('pass')
editor_stack.find_widget.replace_find()
editor_stack.find_widget.replace_find()
editor_stack.find_widget.replace_find()
editor_stack.find_widget.replace_find()
editor_text = editor.toPlainText()
assert editor_text == ' pass \nTEST \nTest \ntesT '
def test_replace_current_selected_line(editor_find_replace_bot, qtbot):
editor_stack, editor, finder = editor_find_replace_bot
expected_new_text = ('ham bacon\n'
'spam sausage\n'
'spam egg')
old_text = editor.toPlainText()
finder.show()
finder.show_replace()
qtbot.keyClicks(finder.search_text, 'spam')
qtbot.keyClicks(finder.replace_text, 'ham')
qtbot.keyPress(finder.replace_text, Qt.Key_Return)
assert editor.toPlainText()[0:-1] == expected_new_text
def test_replace_enter_press(editor_find_replace_bot, qtbot):
"""Test advance forward pressing Enter, and backwards with Shift+Enter."""
editor_stack, editor, finder = editor_find_replace_bot
text = ' \nspam \nspam \nspam '
editor.set_text(text)
finder.show()
finder.search_text.add_text('spam')
# search forward
qtbot.keyPress(finder.search_text, Qt.Key_Return)
assert editor.get_cursor_line_column() == (1,4)
qtbot.keyPress(finder.search_text, Qt.Key_Return)
assert editor.get_cursor_line_column() == (2,4)
qtbot.keyPress(finder.search_text, Qt.Key_Return)
assert editor.get_cursor_line_column() == (3,4)
# search backwards
qtbot.keyPress(finder.search_text, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert editor.get_cursor_line_column() == (2,4)
qtbot.keyPress(finder.search_text, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert editor.get_cursor_line_column() == (1,4)
qtbot.keyPress(finder.search_text, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert editor.get_cursor_line_column() == (3,4)
def test_replace_plain_regex(editor_find_replace_bot, qtbot):
"""Test that regex reserved characters are displayed as plain text."""
editor_stack, editor, finder = editor_find_replace_bot
expected_new_text = ('.\\[()]*test bacon\n'
'spam sausage\n'
'spam egg')
finder.show()
finder.show_replace()
qtbot.keyClicks(finder.search_text, 'spam')
qtbot.keyClicks(finder.replace_text, r'.\[()]*test')
qtbot.keyPress(finder.replace_text, Qt.Key_Return)
assert editor.toPlainText()[0:-1] == expected_new_text
def test_replace_invalid_regex(editor_find_replace_bot, qtbot):
"""Assert that replacing an invalid regexp does nothing."""
editor_stack, editor, finder = editor_find_replace_bot
old_text = editor.toPlainText()
finder.show()
finder.show_replace()
# Test with invalid search_text and valid replace_text
qtbot.keyClicks(finder.search_text, '\\')
qtbot.keyClicks(finder.replace_text, 'anything')
if not finder.re_button.isChecked():
qtbot.mouseClick(finder.re_button, Qt.LeftButton)
qtbot.mouseClick(finder.replace_button, Qt.LeftButton)
assert editor.toPlainText() == old_text
qtbot.mouseClick(finder.replace_sel_button, Qt.LeftButton)
assert editor.toPlainText() == old_text
qtbot.mouseClick(finder.replace_all_button, Qt.LeftButton)
assert editor.toPlainText() == old_text
# Test with valid search_text and invalid replace_text
qtbot.keyClicks(finder.search_text, 'anything')
qtbot.keyClicks(finder.replace_text, '\\')
qtbot.mouseClick(finder.replace_button, Qt.LeftButton)
assert editor.toPlainText() == old_text
qtbot.mouseClick(finder.replace_sel_button, Qt.LeftButton)
assert editor.toPlainText() == old_text
qtbot.mouseClick(finder.replace_all_button, Qt.LeftButton)
assert editor.toPlainText() == old_text
def test_selection_escape_characters(editor_find_replace_bot, qtbot):
editor_stack, editor, finder = editor_find_replace_bot
expected_new_text = ('spam bacon\n'
'spam sausage\n'
'spam egg\n'
'\\n \\t some escape characters')
qtbot.keyClicks(editor, '\\n \\t escape characters')
finder.show()
finder.show_replace()
qtbot.keyClicks(finder.search_text, 'escape')
qtbot.keyClicks(finder.replace_text, 'some escape')
# Select last line
cursor = editor.textCursor()
cursor.select(QTextCursor.LineUnderCursor)
assert cursor.selection().toPlainText() == "\\n \\t escape characters"
#replace
finder.replace_find_selection()
assert editor.toPlainText() == expected_new_text
def test_advance_cell(editor_cells_bot):
editor_stack, editor = editor_cells_bot
# cursor at the end of the file
assert editor.get_cursor_line_column() == (10, 0)
# advance backwards to the begining of the 3rd cell
editor_stack.advance_cell(reverse=True)
assert editor.get_cursor_line_column() == (6, 0)
# advance backwards to 2nd cell
editor_stack.advance_cell(reverse=True)
assert editor.get_cursor_line_column() == (3, 0)
# advance backwards to 1st cell
editor_stack.advance_cell(reverse=True)
assert editor.get_cursor_line_column() == (0, 0)
# advance to 2nd cell
editor_stack.advance_cell()
assert editor.get_cursor_line_column() == (3, 0)
# advance to 3rd cell
editor_stack.advance_cell()
assert editor.get_cursor_line_column() == (6, 0)
def test_unfold_when_searching(editor_folding_bot, qtbot):
editor_stack, editor, finder = editor_folding_bot
folding_panel = editor.panels.get('FoldingPanel')
line_search = editor.document().findBlockByLineNumber(3)
# fold region
block = editor.document().findBlockByLineNumber(1)
folding_panel.toggle_fold_trigger(block)
assert not line_search.isVisible()
# unfolded when searching
finder.show()
qtbot.keyClicks(finder.search_text, 'print')
qtbot.keyPress(finder.search_text, Qt.Key_Return)
assert line_search.isVisible()
def test_unfold_goto(editor_folding_bot):
editor_stack, editor, finder = editor_folding_bot
folding_panel = editor.panels.get('FoldingPanel')
line_goto = editor.document().findBlockByLineNumber(3)
# fold region
block = editor.document().findBlockByLineNumber(1)
folding_panel.toggle_fold_trigger(block)
assert not line_goto.isVisible()
# unfolded when goto
editor.go_to_line(4)
assert line_goto.isVisible()
@pytest.mark.skipif(PY2, reason="Python2 does not support unicode very well")
def test_get_current_word(base_editor_bot, qtbot):
"""Test getting selected valid python word."""
editor_stack = base_editor_bot
text = ('some words with non-ascii characters\n'
'niño\n'
'garçon\n'
'α alpha greek\n'
'123valid_python_word')
finfo = editor_stack.new('foo.py', 'utf-8', text)
qtbot.addWidget(editor_stack)
editor = finfo.editor
editor.go_to_line(1)
# Select some
editor.moveCursor(QTextCursor.EndOfWord, QTextCursor.KeepAnchor)
assert 'some' == editor.textCursor().selectedText()
assert editor.get_current_word() == 'some'
# Select niño
editor.go_to_line(2)
editor.moveCursor(QTextCursor.EndOfWord, QTextCursor.KeepAnchor)
assert 'niño' == editor.textCursor().selectedText()
assert editor.get_current_word() == 'niño'
# Select garçon
editor.go_to_line(3)
editor.moveCursor(QTextCursor.EndOfWord, QTextCursor.KeepAnchor)
assert 'garçon' == editor.textCursor().selectedText()
assert editor.get_current_word() == 'garçon'
# Select α
editor.go_to_line(4)
editor.moveCursor(QTextCursor.EndOfWord, QTextCursor.KeepAnchor)
assert 'α' == editor.textCursor().selectedText()
assert editor.get_current_word() == 'α'
# Select valid_python_word, should search first valid python word
editor.go_to_line(5)
editor.moveCursor(QTextCursor.EndOfWord, QTextCursor.KeepAnchor)
assert '123valid_python_word' == editor.textCursor().selectedText()
assert editor.get_current_word() == 'valid_python_word'
def test_tab_keypress_properly_caught_find_replace(editor_find_replace_bot, qtbot):
"""Test that tab works in find/replace dialog. Regression test for #3674.
Mock test—more isolated but less flimsy."""
editor_stack, editor, finder = editor_find_replace_bot
text = ' \nspam \nspam \nspam '
editor.set_text(text)
finder.show()
finder.show_replace()
finder.focusNextChild = MagicMock(name="focusNextChild")
qtbot.keyPress(finder.search_text, Qt.Key_Tab)
finder.focusNextChild.assert_called_once_with()
@flaky(max_runs=3)
@pytest.mark.skipif(os.environ.get('CI', None) is None and
platform.startswith('linux'),
reason="Fails on some Linux platforms locally.")
def test_tab_moves_focus_from_search_to_replace(editor_find_replace_bot, qtbot):
"""Test that tab works in find/replace dialog. Regression test for #3674.
"Real world" test—more comprehensive but potentially less robust."""
editor_stack, editor, finder = editor_find_replace_bot
text = ' \nspam \nspam \nspam '
editor.set_text(text)
finder.show()
finder.show_replace()
qtbot.wait(100)
finder.search_text.setFocus()
qtbot.wait(100)
assert finder.search_text.hasFocus()
assert not finder.replace_text.hasFocus()
qtbot.keyPress(finder.search_text, Qt.Key_Tab)
qtbot.wait(100)
assert not finder.search_text.hasFocus()
assert finder.replace_text.hasFocus()
@flaky(max_runs=3)
@pytest.mark.skipif(not os.name == 'nt', reason="Fails on Linux and macOS.")
def test_tab_copies_find_to_replace(editor_find_replace_bot, qtbot):
"""Check that text in the find box is copied to the replace box on tab
keypress. Regression test #4482."""
editor_stack, editor, finder = editor_find_replace_bot
finder.show()
finder.show_replace()
finder.search_text.setFocus()
finder.search_text.set_current_text('This is some test text!')
qtbot.keyClick(finder.search_text, Qt.Key_Tab)
qtbot.wait(500)
assert finder.replace_text.currentText() == 'This is some test text!'
if __name__ == "__main__":
pytest.main()
|
[] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
python
| 1 | 0 | |
backend/everpro/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'everpro.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
modules/skaldship/nmap.py
|
# encoding: utf-8
__version__ = "1.0"
"""
##--------------------------------------#
## Kvasir
##
## (c) 2010-2013 Cisco Systems, Inc.
##
## nMap Utilities for Kvasir
##
## Author: Kurt Grutzmacher <[email protected]>
##--------------------------------------#
"""
from gluon import current
from skaldship.log import log
import logging
db = current.globalenv['db']
cache = current.globalenv['cache']
auth = current.globalenv['auth']
settings = current.globalenv['settings']
##-------------------------------------------------------------------------
def script_metadata():
"""
Load nmap script metadata into a dictionary
"""
try:
from zenmapCore_Kvasir.ScriptMetadata import get_script_entries
except ImportError, e:
return dict(error="Cannot load zenmap python library: %s" % (e))
scr_mdata = get_script_entries(settings.nmap_scriptdir, settings.nmap_nselibdir)
scripts = {}
for scr in scr_mdata:
scripts[scr.filename] = {
'usage': scr.usage,
'description': scr.description,
'arguments': scr.arguments,
'categories': scr.categories,
'author': scr.author,
'output': scr.output,
'url': scr.url,
}
return scripts
##-------------------------------------------------------------------------
def process_xml(
filename=None,
addnoports=False,
asset_group=None,
engineer=None,
msf_workspace=False,
ip_ignore_list=None,
ip_include_list=None,
update_hosts=False,
):
# Upload and process nMap XML Scan file
import re
from MetasploitAPI import MetasploitAPI
from skaldship.general import get_host_record, do_host_status
from skaldship.cpe import lookup_cpe
from zenmapCore_Kvasir.NmapParser import NmapParser
# output regexes
RE_NETBIOS_NAME = re.compile('NetBIOS computer name: (?P<d>.*),')
RE_NETBIOS_WORKGROUP = re.compile('Workgroup: (?P<d>.*),')
RE_NETBIOS_MAC = re.compile('NetBIOS MAC: (?P<d>([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2}))')
# build the hosts only/exclude list
ip_exclude = []
if ip_ignore_list:
ip_exclude = ip_ignore_list.split('\r\n')
# TODO: check for ip subnet/range and break it out to individuals
ip_only = []
if ip_include_list:
ip_only = ip_include_list.split('\r\n')
# TODO: check for ip subnet/range and break it out to individuals
log(" [*] Processing nMap scan file %s" % (filename))
nmap_parsed = NmapParser()
nmap_parsed.parse_file(filename)
#existing_vulnids = db(db.t_vulndata()).select(db.t_vulndata.id, db.t_vulndata.f_vulnid).as_dict(key='f_vulnid')
user_id = db.auth_user(engineer) or auth.user.id
# parse the hosts, where all the goodies are
log(" [-] Parsing %d hosts" % (len(nmap_parsed.hosts)))
hoststats = {}
hoststats['added'] = 0
hoststats['skipped'] = 0
hoststats['updated'] = 0
hoststats['errored'] = 0
hosts = [] # array of host_id fields
svc_db = db.t_services
for node in nmap_parsed.hosts:
nodefields = {}
if node.ipv6:
ipaddr = node.ipv6
nodefields['f_ipv4'] = ipaddr
elif node.ip.get('type') == 'ipv4':
ipaddr = node.ip.get('addr')
nodefields['f_ipv4'] = ipaddr
else:
log(" [!] No IPv4/IPv6 address, skipping")
continue
nodefields['f_macaddr'] = node.mac
status = node.state
log(" [-] Host %s status is: %s" % (ipaddr, status))
if status != "up":
hoststats['skipped'] += 1
continue
if ipaddr in ip_exclude:
log(" [-] Host is in exclude list... skipping")
hoststats['skipped'] += 1
continue
if len(ip_only) > 0 and ipaddr not in ip_only:
log(" [-] Host is not in the only list... skipping")
hoststats['skipped'] += 1
continue
if not node.ports and not addnoports:
log(" [-] No ports open and not asked to add those kind... skipping")
hoststats['skipped'] += 1
continue
# we'lll just take the last hostname in the names list since it'll usually be the full dns name
for name in node.hostnames:
nodefields['f_hostname'] = name
nodefields['f_engineer'] = user_id
nodefields['f_asset_group'] = asset_group
nodefields['f_confirmed'] = False
# check to see if IPv4/IPv6 exists in DB already
if 'f_ipv4' in nodefields:
host_rec = db(db.t_hosts.f_ipv4 == nodefields['f_ipv4']).select().first()
elif 'f_ipv6' in nodefields:
host_rec = db(db.t_hosts.f_ipv6 == nodefields['f_ipv6']).select().first()
else:
log("No IP Address found in record. Skipping", logging.ERROR)
continue
if host_rec is None:
host_id = db.t_hosts.insert(**nodefields)
db.commit()
hoststats['added'] += 1
log(" [-] Adding %s" % (ipaddr))
elif host_rec is not None and update_hosts:
db.commit()
if 'f_ipv4' in nodefields:
host_id = db(db.t_hosts.f_ipv4 == nodefields['f_ipv4']).update(**nodefields)
else:
host_id = db(db.t_hosts.f_ipv6 == nodefields['f_ipv6']).update(**nodefields)
db.commit()
host_id = get_host_record(ipaddr)
host_id = host_id.id
hoststats['updated'] += 1
log(" [-] Updating %s" % (ipaddr))
else:
hoststats['skipped'] += 1
db.commit()
log(" [-] Skipped %s" % (ipaddr))
continue
hosts.append(host_id)
# process non-port <hostscript> entries. Add to info/0:
for hostscripts in node.hostscripts:
query = (svc_db.f_proto == 'info') & (svc_db.f_number == 0) & (svc_db.f_hosts_id == host_id)
svc_id = db.t_services.update_or_insert(query, f_proto='info', f_number=0, f_status='open', f_hosts_id=host_id)
if not svc_id:
svc_rec = db(query).select(cache=(cache.ram, 180)).first()
if svc_rec:
svc_id = svc_rec.id
else:
log(" [!] Service record wasn't created", logging.ERROR)
continue
db.commit()
for script in hostscripts:
script_id = script.id
output = script.output
db.t_service_info.update_or_insert(f_services_id=svc_id, f_name=script_id, f_text=output)
db.commit()
if script_id == 'nbstat':
# pull out NetBIOS info from nbstat output
result = RE_NETBIOS_MAC.search(output)
if 'd' in result.groupdict():
host_rec.update(f_macaddr=result.group('d'))
db.commit()
result = RE_NETBIOS_NAME.search(output)
if 'd' in result.groupdict():
host_rec.update(f_netbios_name=result.group('d'))
db.commit()
result = RE_NETBIOS_WORKGROUP.search(output)
if 'd' in result.groupdict():
db(db.t_netbios.update_or_insert(f_hosts_id=host_id, f_domain=result.group('d')))
db.commit()
# add ports and resulting vulndata
for port in node.ports:
f_proto = port.get('protocol')
f_number = port.get('portid')
f_status = port.get('port_state')
f_name = port.get('service_name')
f_product = port.get('service_product')
log(" [-] Adding port: %s/%s (%s)" % (f_proto, f_number, f_name))
svc_id = db.t_services.update_or_insert(f_proto=f_proto, f_number=f_number, f_status=f_status, f_hosts_id=host_id, f_name=f_name)
if f_product:
version = port.get('service_version')
if version:
f_product += " (%s)" % (version)
db.t_service_info.update_or_insert(f_services_id=svc_id, f_name=f_name, f_text=f_product)
db.commit()
# Process <script> service entries
for script in port.get('scripts'):
db.t_service_info.update_or_insert(f_services_id=svc_id, f_name=script.get('id'), f_text=script.get('output'))
db.commit()
# Process <cpe> service entries
for port_cpe in port.get('service_cpe'):
cpe_id = port_cpe.text.lstrip('cpe:/')
if cpe_id[0] == "a":
# process CPE Applications
#log(" [-] Found Application CPE data: %s" % (cpe_id))
db.t_service_info.update_or_insert(f_services_id=svc_id, f_name='cpe.app', f_text="cpe:/%s" % (cpe_id))
db.commit()
elif cpe_id[0] == "o":
# process CPE Operating System
os_id = lookup_cpe(cpe_id[2:])
if os_id is not None:
db.t_host_os_refs.insert(f_certainty='0.9',
f_family='Unknown',
f_class='Other',
f_hosts_id=host_id,
f_os_id=os_id)
db.commit()
else:
# So no CPE or existing OS data, lets split up the CPE data and make our own
log(" [!] No os_id found, this is odd !!!")
if msf_workspace:
msf = MetasploitAPI(host=user_id.f_msf_pro_url, apikey=user_id.f_msf_pro_key)
if msf.login():
try:
res = msf.pro_import_file(
msf_workspace,
filename,
{
'DS_REMOVE_FILE': False,
'tag': asset_group,
},
)
log(" [*] Added file to MSF Pro: %s" % (res))
except MetasploitAPI.MSFAPIError, e:
logging.error("MSFAPI Error: %s" % (e))
pass
else:
log(" [!] Unable to login to Metasploit PRO, check your API key", logging.ERROR)
msf = None
# any new nexpose vulns need to be checked against exploits table and connected
log(" [*] Connecting exploits to vulns and performing do_host_status")
do_host_status(asset_group=asset_group)
log(" [*] Import complete: hosts: %s added, %s skipped, %s errors - vulns: %s added, %s skipped" % (hoststats['added'],
hoststats['skipped'],
hoststats['errored'],
))
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
pkg/acsengine/defaults-kubelet.go
|
package acsengine
import (
"strconv"
"strings"
"github.com/Azure/acs-engine/pkg/api"
"github.com/Azure/acs-engine/pkg/api/common"
"github.com/Azure/acs-engine/pkg/helpers"
)
func setKubeletConfig(cs *api.ContainerService) {
o := cs.Properties.OrchestratorProfile
cloudSpecConfig := getCloudSpecConfig(cs.Location)
staticLinuxKubeletConfig := map[string]string{
"--address": "0.0.0.0",
"--allow-privileged": "true",
"--anonymous-auth": "false",
"--authorization-mode": "Webhook",
"--client-ca-file": "/etc/kubernetes/certs/ca.crt",
"--pod-manifest-path": "/etc/kubernetes/manifests",
"--cluster-dns": o.KubernetesConfig.DNSServiceIP,
"--cgroups-per-qos": "true",
"--enforce-node-allocatable": "pods",
"--kubeconfig": "/var/lib/kubelet/kubeconfig",
"--keep-terminated-pod-volumes": "false",
}
// Start with copy of Linux config
staticWindowsKubeletConfig := make(map[string]string)
for key, val := range staticLinuxKubeletConfig {
staticWindowsKubeletConfig[key] = val
}
// Add Windows-specific overrides
// Eventually paths should not be hardcoded here. They should be relative to $global:KubeDir in the PowerShell script
staticWindowsKubeletConfig["--azure-container-registry-config"] = "c:\\k\\azure.json"
staticWindowsKubeletConfig["--pod-infra-container-image"] = "kubletwin/pause"
staticWindowsKubeletConfig["--kubeconfig"] = "c:\\k\\config"
staticWindowsKubeletConfig["--cloud-config"] = "c:\\k\\azure.json"
staticWindowsKubeletConfig["--cgroups-per-qos"] = "false"
staticWindowsKubeletConfig["--enforce-node-allocatable"] = "\"\"\"\""
staticWindowsKubeletConfig["--client-ca-file"] = "c:\\k\\ca.crt"
staticWindowsKubeletConfig["--hairpin-mode"] = "promiscuous-bridge"
staticWindowsKubeletConfig["--image-pull-progress-deadline"] = "20m"
staticWindowsKubeletConfig["--resolv-conf"] = "\"\"\"\""
// Default Kubelet config
defaultKubeletConfig := map[string]string{
"--cluster-domain": "cluster.local",
"--network-plugin": "cni",
"--pod-infra-container-image": cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase + KubeConfigs[o.OrchestratorVersion]["pause"],
"--max-pods": strconv.Itoa(DefaultKubernetesMaxPods),
"--eviction-hard": DefaultKubernetesHardEvictionThreshold,
"--node-status-update-frequency": KubeConfigs[o.OrchestratorVersion]["nodestatusfreq"],
"--image-gc-high-threshold": strconv.Itoa(DefaultKubernetesGCHighThreshold),
"--image-gc-low-threshold": strconv.Itoa(DefaultKubernetesGCLowThreshold),
"--non-masquerade-cidr": "0.0.0.0",
"--cloud-provider": "azure",
"--cloud-config": "/etc/kubernetes/azure.json",
"--azure-container-registry-config": "/etc/kubernetes/azure.json",
"--event-qps": DefaultKubeletEventQPS,
"--cadvisor-port": DefaultKubeletCadvisorPort,
"--pod-max-pids": strconv.Itoa(DefaultKubeletPodMaxPIDs),
"--image-pull-progress-deadline": "30m",
}
// Apply Azure CNI-specific --max-pods value
if o.KubernetesConfig.NetworkPlugin == NetworkPluginAzure {
defaultKubeletConfig["--max-pods"] = strconv.Itoa(DefaultKubernetesMaxPodsVNETIntegrated)
}
// If no user-configurable kubelet config values exists, use the defaults
setMissingKubeletValues(o.KubernetesConfig, defaultKubeletConfig)
addDefaultFeatureGates(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "", "")
addDefaultFeatureGates(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "1.8.0", "PodPriority=true")
// Override default cloud-provider?
if helpers.IsTrueBoolPointer(o.KubernetesConfig.UseCloudControllerManager) {
staticLinuxKubeletConfig["--cloud-provider"] = "external"
}
// Override default --network-plugin?
if o.KubernetesConfig.NetworkPlugin == NetworkPluginKubenet {
if o.KubernetesConfig.NetworkPolicy != NetworkPolicyCalico {
o.KubernetesConfig.KubeletConfig["--network-plugin"] = NetworkPluginKubenet
}
}
// We don't support user-configurable values for the following,
// so any of the value assignments below will override user-provided values
for key, val := range staticLinuxKubeletConfig {
o.KubernetesConfig.KubeletConfig[key] = val
}
// Remove secure kubelet flags, if configured
if !helpers.IsTrueBoolPointer(o.KubernetesConfig.EnableSecureKubelet) {
for _, key := range []string{"--anonymous-auth", "--client-ca-file"} {
delete(o.KubernetesConfig.KubeletConfig, key)
}
}
removeKubeletFlags(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion)
// Master-specific kubelet config changes go here
if cs.Properties.MasterProfile != nil {
if cs.Properties.MasterProfile.KubernetesConfig == nil {
cs.Properties.MasterProfile.KubernetesConfig = &api.KubernetesConfig{}
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig = copyMap(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig)
}
setMissingKubeletValues(cs.Properties.MasterProfile.KubernetesConfig, o.KubernetesConfig.KubeletConfig)
addDefaultFeatureGates(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "", "")
removeKubeletFlags(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion)
}
// Agent-specific kubelet config changes go here
for _, profile := range cs.Properties.AgentPoolProfiles {
if profile.KubernetesConfig == nil {
profile.KubernetesConfig = &api.KubernetesConfig{}
profile.KubernetesConfig.KubeletConfig = copyMap(profile.KubernetesConfig.KubeletConfig)
if profile.OSType == "Windows" {
for key, val := range staticWindowsKubeletConfig {
profile.KubernetesConfig.KubeletConfig[key] = val
}
}
}
setMissingKubeletValues(profile.KubernetesConfig, o.KubernetesConfig.KubeletConfig)
if profile.OSType == "Windows" {
// Remove Linux-specific values
delete(profile.KubernetesConfig.KubeletConfig, "--pod-manifest-path")
}
// For N Series (GPU) VMs
if strings.Contains(profile.VMSize, "Standard_N") {
if !cs.Properties.IsNVIDIADevicePluginEnabled() && !common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.11.0") {
// enabling accelerators for Kubernetes >= 1.6 to <= 1.9
addDefaultFeatureGates(profile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "1.6.0", "Accelerators=true")
}
}
removeKubeletFlags(profile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion)
}
}
func removeKubeletFlags(k map[string]string, v string) {
// Get rid of values not supported until v1.10
if !common.IsKubernetesVersionGe(v, "1.10.0") {
for _, key := range []string{"--pod-max-pids"} {
delete(k, key)
}
}
// Get rid of values not supported in v1.12 and up
if common.IsKubernetesVersionGe(v, "1.12.0") {
for _, key := range []string{"--cadvisor-port"} {
delete(k, key)
}
}
}
func setMissingKubeletValues(p *api.KubernetesConfig, d map[string]string) {
if p.KubeletConfig == nil {
p.KubeletConfig = d
} else {
for key, val := range d {
// If we don't have a user-configurable value for each option
if _, ok := p.KubeletConfig[key]; !ok {
// then assign the default value
p.KubeletConfig[key] = val
}
}
}
}
func copyMap(input map[string]string) map[string]string {
copy := map[string]string{}
for key, value := range input {
copy[key] = value
}
return copy
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
academicstoday_project/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "academicstoday_project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
prelink_binary.py
|
#!/usr/bin/env python2
import os
import sys
import shlex
import shutil
import subprocess
import re
from elftools.elf.elffile import ELFFile
from elftools.elf.segments import InterpSegment
def ex(cmd, env_override=None):
"""Execute a given command (string), returning stdout if succesfull and
raising an exception otherwise."""
env = os.environ.copy()
if env_override:
for k, v in env_override.items():
env[k] = v
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
p.wait()
if p.returncode:
print "Error while executing command '%s': %d" % (cmd, p.returncode)
print "stdout: '%s'" % p.stdout.read()
print "stderr: '%s'" % p.stderr.read()
raise Exception("Error while executing command")
return p.stdout.read()
def get_overlap(mapping, other_mappings):
"""Returns the *last* area out of `other_mappings` that overlaps with
`mapping`, or None. Assumes `other_mappings` is ordered."""
start, size = mapping
end = start + size
for m in other_mappings[::-1]:
m_start, m_size = m
m_end = m_start + m_size
if (start >= m_start and start < m_end) or \
(end > m_start and end <= m_end) or \
(start <= m_start and end >= m_end):
return m
return None
def get_binary_info(prog):
"""Look for the loader requested by the program, ELF type, and record
existing mappings required by program itself."""
interp = None
binary_mappings = []
elftype = None
with open(prog, 'rb') as f:
e = ELFFile(f)
elftype = e.header['e_type']
for seg in e.iter_segments():
if isinstance(seg, InterpSegment):
interp = seg.get_interp_name()
if seg['p_type'] == 'PT_LOAD':
binary_mappings.append((seg['p_vaddr'], seg['p_memsz']))
if interp is None:
raise Exception("Could not find interp in binary")
return interp, elftype, binary_mappings
def get_library_deps(library, library_path):
"""Look for all dependency libraries for a given ELF library/binary, and
return a list of full paths. Uses the ldd command to find this information
at load-time, so may not be complete."""
# TODO: do we have to do this recursively for all deps?
deps = []
ldd = ex("ldd \"%s\"" % library, {"LD_LIBRARY_PATH": library_path})
for l in ldd.split("\n"):
m = re.search(r".*.so.* => (/.*\.so[^ ]*)", l)
if not m:
continue
deps.append(m.group(1))
return deps
def prelink_libs(libs, outdir, existing_mappings, baseaddr=0xf0ffffff):
"""For every library we calculate its size and alignment, find a space in
our new compact addr space and create a copy of the library that is
prelinked to the addr. Start mapping these from the *end* of the addr space
down, but leaving a bit of space at the top for stuff like the stack."""
for lib in libs:
newlib = os.path.join(outdir, os.path.basename(lib))
if lib != newlib:
shutil.copy(lib, newlib)
reallib = os.path.realpath(lib)
for debuglib in (reallib + ".debug", "/usr/lib/debug" + reallib):
if os.path.exists(debuglib):
newdebuglib = newlib + ".debug"
shutil.copy(debuglib, newdebuglib)
ex("objcopy --remove-section=.gnu_debuglink \"%s\"" % newlib)
ex("objcopy --add-gnu-debuglink=\"%s\" \"%s\"" % (newdebuglib, newlib))
break
with open(newlib, 'rb') as f:
# Determine the alignment and size required for all LOAD segments
# combined
align, size = 0, 0
e = ELFFile(f)
for seg in e.iter_segments():
if seg['p_type'] != 'PT_LOAD':
continue
if seg['p_align'] > align:
align = seg['p_align']
size = seg['p_vaddr'] + seg['p_memsz']
# Add some breathing room, otherwise mmap might think it won't fit.
size += 4096
baseaddr -= size
if baseaddr < 0:
print >>sys.stderr, '[ShrinkAddrSpace] Error: not enough space to prelink libraries'
sys.exit(1)
# Search for a slot that is not overlapping with anything else and
# aligned properly
found = False
while not found:
if baseaddr % align:
baseaddr -= baseaddr % align
overlap = get_overlap((baseaddr, size), existing_mappings)
if overlap:
baseaddr = overlap[0] - size
else:
found = True
print "Found %08x - %08x for %s" % (baseaddr, baseaddr + size, lib)
ex("prelink -r 0x%x \"%s\"" % (baseaddr, newlib))
def baseaddr_from_bits(bits):
assert bits > 8
reserve_for_stack = (1 << (bits - 4)) - (1 << (bits - 8))
return (1 << bits) - 1 - reserve_for_stack
def make_binary_EXEC(path):
"""Force the type of a binary to ET_EXEC (instead of for instance ET_DYN).
This is horrible."""
with open(path, 'rb') as f:
conts = f.read()
conts = conts[:0x10] + '\x02' + conts[0x11:] # e_type = ET_EXEC
with open(path, 'wb') as f:
f.write(conts)
def main():
import argparse
parser = argparse.ArgumentParser(description="Shrink address space of "
"given binary by prelinking all dependency libraries.")
parser.add_argument("binary", help="The ELF binary")
parser.add_argument("--in-place", help="Modify binary in-place",
action="store_true", default=False)
parser.add_argument("--set-rpath",
help="Set RPATH of (new) binary and preload lib to out-dir",
action="store_true", default=False)
parser.add_argument("--preload-lib", default="libshrink-preload.so",
help="Library used via LD_PRELOAD that moves stack and mmap_base")
parser.add_argument("--out-dir", default="",
help="Output directory for prelinked libs")
parser.add_argument("--library-path", default="",
help="LD_LIBRARY_PATH to export during library scan of binary")
parser.add_argument("--static-lib",
help="Compile for static linking (exclude libpreload)",
action="store_true", default=False)
parser.add_argument("--addrspace-bits",
help="number of bits in the address space (for base address)",
type=int, default=32)
args = parser.parse_args()
outdir = args.out_dir
if not args.out_dir:
outdir = os.path.abspath("prelink-%s" % args.binary.replace("/", "_"))
print "[ShrinkAddrSpace] for %s, using output dir %s, linked %s" % \
(args.binary, outdir, "statically" if args.static_lib else
"dynamically")
if os.path.isdir(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
if args.in_place:
newprog = args.binary
else:
newprog = os.path.join(outdir, os.path.basename(args.binary))
shutil.copy(args.binary, newprog)
# Get loader and existing mappings for binary
interp, elftype, binary_mappings = get_binary_info(args.binary)
# Determine all dependency libraries
libs = set()
libs.add(interp)
libs.update(get_library_deps(args.binary, args.library_path))
# If using preload library, add that too
if not args.static_lib:
libs.update(get_library_deps(args.preload_lib, args.library_path))
libs.add(args.preload_lib)
# For PIE the binary is a library too
if elftype == 'ET_DYN':
libs.add(newprog)
# The magic: construct new addr space by prelinking all dependency libs.
# We need to do this first, as patchelf (later) may change the order of
# segments, causing prelink to bail out.
baseaddr = baseaddr_from_bits(args.addrspace_bits)
prelink_libs(libs, outdir, binary_mappings, baseaddr)
# The loader ignores addresses (set by prelink) for programs that are ET_DYN
# (i.e., PIE binaries), so we need to make it ET_EXEC instead.
# We need to do this *before* patchelf, otherwise it may break the binary.
if elftype == 'ET_DYN':
make_binary_EXEC(newprog)
# Update the loader to use our prelinked version
newinterp = os.path.realpath(os.path.join(outdir, os.path.basename(interp)))
ex("patchelf --set-interpreter \"%s\" \"%s\"" % (newinterp, newprog))
# By setting the rpath, we can avoid having to specify LD_LIBRARY_PATH
if args.set_rpath:
absoutdir = os.path.realpath(outdir)
ex("patchelf --set-rpath \"%s\" \"%s\"" % (absoutdir, newprog))
if not args.static_lib:
newpreload = os.path.join(outdir, os.path.basename(args.preload_lib))
ex("patchelf --set-rpath \"%s\" \"%s\"" % (absoutdir, newpreload))
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Baseline MaskRCNN.py
|
import sys
import numpy as np
import tensorflow as tf
from datetime import datetime
import os
import sys
import random
import math
import numpy as np
import cv2
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 15
#import seaborn as sns
import json
from tqdm import tqdm
import pandas as pd
pd.set_option("display.max_rows", 101)
import glob
from collections import Counter
from PIL import Image
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
#input_dir = "./advancedML/data/"
'''
One Dataset is around more than 200mb
The size of one batch is mainly determined by batch size, image size, maximum object number
batch size = IMAGES_PER_GPU*GPU_COUNT in config.py
image size is related to MAX_IMG_DIM,MIN_IMG_DIM in config.py
maximum object numbers = MAX_GT_OBJECTS in config.py
suppose dtpye is int8 --> at least batch_size*1024*1024*2*100 = batch_size*200mb
This library use keras' fit_generator with argument multiprocessing=true, workers=cpu_count
This means multiple dataset generators are instanciated by each cpu
each cpu=process keeps queue for pipelining, and we can limit the maxmum number of elements in queue by setting max_queue_size in fit_generator function
*it doesn’t mean the training begins only after the queue is filled. Making the yield super-slow shows this.
--> However, if training(consumer) is slower than generating data, queue gets filled, resulting memory blowing up
**Trade-off: if batch size increases, it can help optimize better(gradienet is more accurate), but it can slow dowin the one training step and make queue filled. On the other hand, if batch size is small, whole training time should be longer due to approximate gradient, but one training step is faster, so it does not make queu filled as quickly as when batch size is bigger.
***multiprocessing=false,workers>1 or workers>cpu count-->need for thread-safe generator
****This library workers=cpu count, so you don't need to worry about thread safety
Anyway, avoiding 'out of memory' you need to change some parameters, and The arguments that I think is better to change is : max_queue_size(defualt 100), workers(less than cpu count), IMAGE_PER_GPU(default 1), GPU_COUNT
Example
IMAGE_PER_GPU=8/CPU=16/max_queue_size=100/GPU_COUNT=1
--> maximum size is at least 16*8*1*100*data ==> 2500G!!!!!
120/
KAGGLE KERNEL
CPU=4/RAM=17G --> CPU specifications
CPU=2/RAM=14G --> GPU specifications
IMAGE_PER_GPU=1/max_queu_size=100
--> maximum size is at least 2*1*1*100*data ==> 40G TT
NASH: CPU 32G/ cores: 8
--> maximum 8*batch_size*max_queue_size*data-->2G*batch_size*max_queue_size
20/() -> 10 >= batch_size*max_queue_size super safe
'''
def classid2label(class_id):
category, *attribute = class_id.split("_")
return category, attribute
def json2df(data):
df = pd.DataFrame()
for index, el in enumerate(data):
for key, val in el.items():
df.loc[index, key] = val
return df
input_dir = "advanced_ML/data/"
train_df = pd.read_csv(input_dir + "train.csv")
train_df.head()
with open(input_dir + "label_descriptions.json") as f:
label_description = json.load(f)
print("this dataset info")
print(json.dumps(label_description["info"], indent=2))
category_df = json2df(label_description["categories"])
category_df["id"] = category_df["id"].astype(int)
category_df["level"] = category_df["level"].astype(int)
attribute_df = json2df(label_description["attributes"])
attribute_df["id"] = attribute_df["id"].astype(int)
attribute_df["level"] = attribute_df["level"].astype(int)
print("Category Labels")
#category_df
print("Attribute Labels")
#attribute_df
counter_category = Counter()
counter_attribute = Counter()
for class_id in train_df["ClassId"]:
category, attribute = classid2label(class_id)
counter_category.update([category])
counter_attribute.update(attribute)
category_name_dict = {}
for i in label_description["categories"]:
category_name_dict[str(i["id"])] = i["name"]
attribute_name_dict = {}
for i in label_description["attributes"]:
attribute_name_dict[str(i["id"])] = i["name"]
ROOT_DIR="fashion/mrcnn"
DATA_DIR="advanced_ML/data"
sys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN'))
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
class FashionDataset(utils.Dataset):
def load_fashion(self,image_ids=None,num_data=None):
'''
add_class --> register 46 classes self.add_class('fashion',i,name)
image_ids --> unique index of images(name?)
self.add_image('fashion',image_ids,width,height,annotations)
width,height --> shape[:2] or extract form dataframe
annotations --> all collections of annotations for each image
Todo:
There are some rows that have height and weight as nan value
validation option is necessary for training
'''
for i,row in category_df.iterrows():
self.add_class('fashion',i,row['name'])
if image_ids is None:
image_ids = list(set(train_df['ImageId']))
if num_data is not None:
random.seed(42)
random.shuffle(image_ids)
image_ids=image_ids[:num_data]
for i in image_ids:
Width = train_df[train_df['ImageId']==i]['Width'].reset_index(drop=True)[0]
Height = train_df[train_df['ImageId']==i]['Height'].reset_index(drop=True)[0]
self.add_image('fashion',
image_id=i,
path=DATA_DIR+'/train/'+i,
width=Width,
height=Height,
annotations=train_df[train_df['ImageId']==i])
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
def load_image(self, image_id):
info = self.image_info[image_id]
ImagePath = info['path']
image = np.asarray(Image.open(ImagePath).convert("RGB"))
return image
def load_mask(self, image_id):
info = self.image_info[image_id]
annotations = info['annotations']
width=info['width']
height=info['height']
instance_masks = []
class_ids = []
for i,annotation in annotations.iterrows():
class_id=annotation['ClassId']
class_id=class_id.split('_')[0]
class_ids.append(class_id)
rle = annotation['EncodedPixels']
instance_masks.append(self.rle_to_mask(rle,width,height))
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)+1
return mask, class_ids
else:
# Call super class to return an empty mask
return super(FashionDataset, self).load_mask(image_id)
def rle_to_mask(self,rle,width,height):
mask = np.zeros(width*height,dtype=np.int8)
pixels_list = list(map(int,rle.split(" ")))
for i in range(0,len(pixels_list),2):
start_pixel = pixels_list[i]-1
num_pixel = pixels_list[i+1]-1
mask[start_pixel:start_pixel+num_pixel] = 1
mask = mask.reshape((height,width),order='F')
return mask
image_ids_list = list(set(train_df['ImageId']))
random.seed(42)
random.shuffle(image_ids_list)
val_split = 0.1
split = int((1-val_split)*len(image_ids_list))
train_ids = image_ids_list[:split]
val_ids = image_ids_list[split:]
train_ids = train_ids[:100]
val_ids = val_ids[:100]
#fashion_dataset = FashionDataset()
#fashion_dataset.load_fashion(num_data=100)
#fashion_dataset.prepare()
fashion_dataset_train = FashionDataset()
fashion_dataset_train.load_fashion(train_ids)
fashion_dataset_val = FashionDataset()
fashion_dataset_val.load_fashion(val_ids)
fashion_dataset_train.prepare()
fashion_dataset_val.prepare()
print("dataset prepared")
#print("Image Count: {}".format(len(fashion_dataset.image_ids)))
#print("Class Count: {}".format(fashion_dataset.num_classes))
#for i, info in enumerate(fashion_dataset.class_info):
# print("{:3}. {:50}".format(i, info['name']))
# Load and display random samples
#image_ids = np.random.choice(fashion_dataset.image_ids, 4)
#for image_id in image_ids:
# image = fashion_dataset.load_image(image_id)
# mask, class_ids = fashion_dataset.load_mask(image_id)
# visualize.display_top_masks(image, mask, class_ids, fashion_dataset.class_names)
# Load random image and mask.
#image_id = random.choice(fashion_dataset.image_ids)
#image = fashion_dataset.load_image(image_id)
#mask, class_ids = fashion_dataset.load_mask(image_id)
# Compute Bounding box
#bbox = utils.extract_bboxes(mask)
# Display image and additional stats
#print("image_id ", image_id, fashion_dataset.image_reference(image_id))
#log("image", image)
#log("mask", mask)
#log("class_ids", class_ids)
#log("bbox", bbox)
# Display image and instances
#visualize.display_instances(image, bbox, mask, class_ids, fashion_dataset.class_names)
# Run-length encoding stolen from https://www.kaggle.com/rakhlin/fast-run-length-encoding-python
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 1))
run_lengths[-1] += 1
prev = b
return run_lengths
'''
def rle_to_mask(rle,width,height):
mask = np.zeros(width*height,dtype=np.int8)
pixels_list = list(map(int,rle.split(" ")))
for i in range(0,len(pixels_list),2):
start_pixel = pixels_list[i]-1
num_pixel = pixels_list[i+1]-1
mask[start_pixel:start_pixel+num_pixel] = 1
mask = mask.reshape((height,width),order='F')
return mask
m=rle_to_mask(sample_rle_encoding,3676,5214)
r=rle_encoding(m)
" ".join([str(e) for e in r]) == sample_rle_encoding
'''
class FashionConfig(Config):
NAME = "fashion"
IMAGES_PER_GPU = 1
# Uncomment to train on 8 GPUs (default is 1)
GPU_COUNT = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 46
STEPS_PER_EPOCH=1000
config = FashionConfig()
#config.display()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=ROOT_DIR)
WEIGHT_PATH = 'last'
'''
if WEIGHT_PATH == "last":
# Find last trained weights
model_path = model.find_last()
elif WEIGHT_PATH == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
pass
#model_path = args.model
'''
model_path='fashion/mrcnn/pre-trained/mask_rcnn_fashion_0105.h5'
model.load_weights(model_path, by_name=True)
epochs_stage1_1=110
epochs_stage1_2=120
epochs_stage2_1=130
epochs_stage2_2=140
epochs_stage3_1=150
epochs_stage3_2=160
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(fashion_dataset_train, fashion_dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=epochs_stage1_1,
layers='heads')
model.train(fashion_dataset_train, fashion_dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=epochs_stage1_2,
layers='heads')
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(fashion_dataset_train, fashion_dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=epochs_stage2_1,
layers='4+')
model.train(fashion_dataset_train, fashion_dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=epochs_stage2_2,
layers='4+')
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(fashion_dataset_train, fashion_dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=epochs_stage3_1,
layers='all')
model.train(fashion_dataset_train, fashion_dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=epochs_stage3_2,
layers='all')
print("Training Finished")
'''
class InferenceConfig(FashionConfig):
#GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
inference_config = InferenceConfig()
inference_config.display()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=ROOT_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
image_ids = train_ids[0]
image=fashion_dataset_train.load_image(0)
result = model.detect([image])
r = result[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
fashion_dataset_train.class_names)
def get_test_filepaths(input_dir):
jpg_fps = glob.glob(input_dir+'/'+'*.jpg')
return list(set(jpg_fps))
test_input_dir=os.path.join(DATA_DIR, 'test')
test_fps=get_test_filepaths(test_input_dir)
'''
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
exec/cplus/cplus.go
|
package cplus
import (
"github.com/chaosblade-io/chaosblade/transport"
"github.com/chaosblade-io/chaosblade/util"
"encoding/json"
"fmt"
"github.com/chaosblade-io/chaosblade/exec"
"context"
"path"
"time"
"os"
)
const ApplicationName = "chaosblade-exec-cplus.jar"
const RemoveAction = "remove"
var cplusJarPath = path.Join(util.GetLibHome(), "cplus", ApplicationName)
var scriptDefaultPath = path.Join(util.GetLibHome(), "cplus", "script")
// 启动 spring boot application,需要校验程序是否已启动
func Prepare(port, scriptLocation string, waitTime int, javaHome string) *transport.Response {
if scriptLocation == "" {
scriptLocation = scriptDefaultPath + "/"
}
response := preCheck(port, scriptLocation)
if !response.Success {
return response
}
javaBin, err := getJavaBin(javaHome)
if err != nil {
return transport.ReturnFail(transport.Code[transport.FileNotFound], err.Error())
}
response = startProxy(port, scriptLocation, javaBin)
if !response.Success {
return response
}
// wait seconds
time.Sleep(time.Duration(waitTime) * time.Second)
return postCheck(port)
}
// getJavaBin returns the java bin path
func getJavaBin(javaHome string) (string, error) {
if javaHome == "" {
// check java bin
response := exec.NewLocalChannel().Run(context.Background(), "java", "-version")
if response.Success {
return "java", nil
}
// get java home
javaHome = os.Getenv("JAVA_HOME")
if javaHome == "" {
return "", fmt.Errorf("JAVA_HOME not found")
}
}
javaBin := path.Join(javaHome, "bin", "java")
response := exec.NewLocalChannel().Run(context.Background(), javaBin, "-version")
if !response.Success {
return "", fmt.Errorf(response.Err)
}
return javaBin, nil
}
func preCheck(port, scriptLocation string) *transport.Response {
// check spring boot application
if processExists(port) {
return transport.ReturnFail(transport.Code[transport.DuplicateError], "the server proxy has been started")
}
// check chaosblade-exec-cplus.jar file exists or not
if !util.IsExist(cplusJarPath) {
return transport.ReturnFail(transport.Code[transport.FileNotFound],
fmt.Sprintf("the %s proxy jar file not found in %s dir", ApplicationName, util.GetLibHome()))
}
// check script file
if !util.IsExist(scriptLocation) {
return transport.ReturnFail(transport.Code[transport.FileNotFound],
fmt.Sprintf("the %s script file dir not found", scriptLocation))
}
// check the port has been used or not
portInUse := util.CheckPortInUse(port)
if portInUse {
return transport.ReturnFail(transport.Code[transport.IllegalParameters],
fmt.Sprintf("the %s port is in use", port))
}
return transport.ReturnSuccess("success")
}
func processExists(port string) bool {
ctx := context.WithValue(context.Background(), exec.ProcessKey, port)
pids, _ := exec.GetPidsByProcessName(ApplicationName, ctx)
if pids != nil && len(pids) > 0 {
return true
}
return false
}
// startProxy invokes `nohup java -jar chaosblade-exec-cplus-1.0-SNAPSHOT1.jar --server.port=8703 --script.location=xxx &`
func startProxy(port, scriptLocation, javaBin string) *transport.Response {
args := fmt.Sprintf("%s -jar %s --server.port=%s --script.location=%s >> %s 2>&1 &",
javaBin,
cplusJarPath,
port, scriptLocation,
util.GetNohupOutput())
return exec.NewLocalChannel().Run(context.Background(), "nohup", args)
}
func postCheck(port string) *transport.Response {
result, err, _ := util.Curl(getProxyServiceUrl(port, "status"))
if err != nil {
return transport.ReturnFail(transport.Code[transport.CplusProxyCmdError], err.Error())
}
var resp transport.Response
json.Unmarshal([]byte(result), &resp)
return &resp
}
// 停止 spring boot application
func Revoke(port string) *transport.Response {
// check process
if !processExists(port) {
return transport.ReturnSuccess("process not exists")
}
// Get http://127.0.0.1:xxx/remove: EOF, doesn't to check the result
util.Curl(getProxyServiceUrl(port, RemoveAction))
time.Sleep(2 * time.Second)
// revoke failed if the check operation returns success
response := postCheck(port)
if response.Success {
return transport.ReturnFail(transport.Code[transport.CplusProxyCmdError], "the process exists")
}
return transport.ReturnSuccess("success")
}
func getProxyServiceUrl(port, action string) string {
return fmt.Sprintf("http://127.0.0.1:%s/%s",
port, action)
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
go
| 1 | 0 | |
src/main/java/software/aws/glue/tableversions/lambda/TableVersionsCleanupPlannerLambda.java
|
// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: MIT-0
package software.aws.glue.tableversions.lambda;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Optional;
import java.util.StringTokenizer;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.glue.AWSGlue;
import com.amazonaws.services.glue.AWSGlueClientBuilder;
import com.amazonaws.services.glue.model.Database;
import com.amazonaws.services.glue.model.Table;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.RequestHandler;
import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
import com.amazonaws.services.securitytoken.model.GetCallerIdentityRequest;
import com.amazonaws.services.securitytoken.model.GetCallerIdentityResult;
import com.amazonaws.services.sqs.AmazonSQS;
import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
import com.google.gson.Gson;
import software.aws.glue.tableversions.utils.DDBUtil;
import software.aws.glue.tableversions.utils.GlueTable;
import software.aws.glue.tableversions.utils.GlueUtil;
import software.aws.glue.tableversions.utils.SQSUtil;
/**
* This class has AWS Lambda Handler method. Upon invocation, it takes the
* following actions: 1. it fetches all databases form Glue Catalog 2. for each
* database, fetches all of its tables 3. for each table, it publishes table
* and database names to SQS queue.
*
* @author Ravi Itha, Amazon Web Services, Inc.
*
*/
public class TableVersionsCleanupPlannerLambda implements RequestHandler<Object, String> {
@Override
public String handleRequest(Object input, Context context) {
String separator = Optional.ofNullable(System.getenv("separator")).orElse("$");
String region = Optional.ofNullable(System.getenv("region")).orElse(Regions.US_EAST_1.getName());
String databaseNamesStringLiteral = Optional.ofNullable(System.getenv("database_names_string_literal"))
.orElse("database_1$database_2");
String sqsQueueURI = Optional.ofNullable(System.getenv("sqs_queue_url"))
.orElse("https://sqs.us-east-1.amazonaws.com/1234567890/table_versions_cleanup_planner_queue.fifo");
String ddbTableName = Optional.ofNullable(System.getenv("ddb_table_name"))
.orElse("glue_table_version_cleanup_planner");
String hashKey = Optional.ofNullable(System.getenv("hash_key")).orElse("execution_batch_id");
String rangeKey = Optional.ofNullable(System.getenv("range_key")).orElse("database_name_table_name");
long executionBatchId = System.currentTimeMillis();
AWSSecurityTokenService client = AWSSecurityTokenServiceClientBuilder.standard().build();
GetCallerIdentityRequest request = new GetCallerIdentityRequest();
GetCallerIdentityResult response = client.getCallerIdentity(request);
String homeCatalogId = response.getAccount();
context.getLogger().log("Catalog Id: " + homeCatalogId);
context.getLogger().log("Input: " + input);
printEnvVariables(sqsQueueURI, databaseNamesStringLiteral, separator, region, ddbTableName, hashKey, rangeKey);
// Create objects for AWS Glue and Amazon SQS
AWSGlue glue = AWSGlueClientBuilder.standard().withRegion(region).build();
AmazonSQS sqs = AmazonSQSClientBuilder.standard().withRegion(region).build();
AmazonDynamoDB dynamoDBClient = AmazonDynamoDBClientBuilder.standard().withRegion(region).build();
DDBUtil ddbUtil = new DDBUtil();
SQSUtil sqsUtil = new SQSUtil();
GlueUtil glueUtil = new GlueUtil();
List<String> databaseNames = new ArrayList<String>();
List<Database> databaseList = new ArrayList<Database>();
AtomicInteger numberOfTablesExported = new AtomicInteger();
// When list of databases are provided as a token separated values then the
// cleanup process will be initiated for those databases.
// else, it imports the cleanup process will be initiated for all databases
if (databaseNamesStringLiteral.equalsIgnoreCase("")) {
databaseList = glueUtil.getDatabases(glue, homeCatalogId);
} else {
databaseNames = tokenizeStrings(databaseNamesStringLiteral, separator);
for (String databaseName : databaseNames) {
Database database = glueUtil.getDatabase(glue, homeCatalogId, databaseName);
if (Optional.ofNullable(database).isPresent())
databaseList.add(database);
}
}
List<Table> tableList = glueUtil.getTables(glue, databaseList, homeCatalogId);
for (Table table : tableList) {
GlueTable tableMessage = new GlueTable();
tableMessage.setDatabaseName(table.getDatabaseName());
tableMessage.setTableName(table.getName());
Gson gson = new Gson();
String message = gson.toJson(tableMessage);
// Write a message to Amazon SQS queue.
boolean messageSentToSQS = sqsUtil.sendTableSchemaToSQSQueue(sqs, sqsQueueURI, message, executionBatchId, table.getDatabaseName());
if (messageSentToSQS) {
String messageSentTime = new Date().toString();
numberOfTablesExported.incrementAndGet();
ddbUtil.insertTableDetailsToDynamoDB(dynamoDBClient, ddbTableName, hashKey, rangeKey, executionBatchId,
table.getDatabaseName(), table.getName(), messageSentTime);
}
}
System.out.printf("Number of messages written to SQS Queue: %d \n", numberOfTablesExported.get());
return "TableVersionsCleanupPlannerLambda completed successfully!";
}
/**
* This method prints environment variables
*
* @param sourceGlueCatalogId
* @param topicArn
* @param ddbTblNameForDBStatusTracking
*/
public static void printEnvVariables(String sqsQueueURI, String databaseNamesStringLiteral, String separator,
String region, String ddbTableName, String hashKey, String rangeKey) {
System.out.println("Region: " + region);
System.out.println("SQS URL: " + sqsQueueURI);
System.out.println("Separator: " + separator);
System.out.println("Database names string literal: " + sqsQueueURI);
System.out.println("DynamoDB table Name: " + ddbTableName);
System.out.println("DynamoDB table - hash key: " + hashKey);
System.out.println("DynamoDB table - range key: " + rangeKey);
}
/**
* This method tokenizes strings using a provided separator
*
* @param str
* @param separator
* @return
*/
public static List<String> tokenizeStrings(String str, String separator) {
List<String> tokenList = Collections.list(new StringTokenizer(str, separator)).stream()
.map(token -> (String) token).collect(Collectors.toList());
return tokenList;
}
}
|
[
"\"separator\"",
"\"region\"",
"\"database_names_string_literal\"",
"\"sqs_queue_url\"",
"\"ddb_table_name\"",
"\"hash_key\"",
"\"range_key\""
] |
[] |
[
"sqs_queue_url",
"separator",
"region",
"database_names_string_literal",
"range_key",
"ddb_table_name",
"hash_key"
] |
[]
|
["sqs_queue_url", "separator", "region", "database_names_string_literal", "range_key", "ddb_table_name", "hash_key"]
|
java
| 7 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webchat.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/deploymanager/config.go
|
package deploymanager
import (
"context"
"fmt"
"os"
"github.com/go-logr/logr"
operatorv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
odfv1alpha1 "github.com/red-hat-storage/odf-operator/api/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
k8sscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
var (
scheme = runtime.NewScheme()
)
func init() {
utilruntime.Must(k8sscheme.AddToScheme(scheme))
utilruntime.Must(odfv1alpha1.AddToScheme(scheme))
utilruntime.Must(operatorv1.AddToScheme(scheme))
utilruntime.Must(operatorv1alpha1.AddToScheme(scheme))
}
type DeployManager struct {
Client client.Client
Ctx context.Context
Log logr.Logger
}
// NewDeployManager creates a DeployManager struct with default configuration
func NewDeployManager() (*DeployManager, error) {
kubeconfig := os.Getenv("KUBECONFIG")
if kubeconfig == "" {
return nil, fmt.Errorf("no KUBECONFIG environment variable set")
}
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
client, err := client.New(config, client.Options{Scheme: scheme})
if err != nil {
return nil, err
}
return &DeployManager{
Client: client,
Log: log.Log.WithName("DeployManager"),
Ctx: context.TODO(),
}, nil
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
src/main/java/dev/rise/util/misc/WhitelistUtil.java
|
package dev.rise.util.misc;
import lombok.experimental.UtilityClass;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
@UtilityClass
public class WhitelistUtil {
public static String HWID() throws Exception {
return textToSHA1(System.getenv("PROCESSOR_IDENTIFIER")
+ System.getenv("COMPUTERNAME") + System.getProperty("user.name"));
}
private static String textToSHA1(final String text) throws NoSuchAlgorithmException {
final MessageDigest md = MessageDigest.getInstance("SHA-1");
final byte[] sha1hash;
md.update(text.getBytes(StandardCharsets.ISO_8859_1), 0, text.length());
sha1hash = md.digest();
return bytesToHex(sha1hash);
}
private static String bytesToHex(final byte[] data) {
final StringBuilder buf = new StringBuilder();
int i = 0;
while (i < data.length) {
int halfbyte = data[i] >>> 4 & 15;
int two_halfs = 0;
do {
if (halfbyte <= 9) {
buf.append((char) (48 + halfbyte));
} else {
buf.append((char) (97 + (halfbyte - 10)));
}
halfbyte = data[i] & 15;
} while (two_halfs++ < 1);
++i;
}
return buf.toString();
}
}
|
[
"\"PROCESSOR_IDENTIFIER\"",
"\"COMPUTERNAME\""
] |
[] |
[
"COMPUTERNAME",
"PROCESSOR_IDENTIFIER"
] |
[]
|
["COMPUTERNAME", "PROCESSOR_IDENTIFIER"]
|
java
| 2 | 0 | |
run.py
|
import os
from app import create_app
config_name = os.getenv("FLASK_ENV")
app = create_app(config_name)
if __name__ == "__main__":
app.run(debug=True)
|
[] |
[] |
[
"FLASK_ENV"
] |
[]
|
["FLASK_ENV"]
|
python
| 1 | 0 | |
lambda/traffic_simulator/handler.py
|
import os
import csv
import random
import boto3
sqs = boto3.client("sqs")
s3 = boto3.client("s3")
QUEUE_URL = os.environ.get("QUEUE_URL")
DATA_URL = os.environ.get("DATA_URL")
BUCKET = DATA_URL.split("/")[2]
KEY = DATA_URL.split("/", 3)[-1]
def lambda_handler(event, context):
s3.download_file(BUCKET, KEY, "/tmp/test.csv")
with open("/tmp/test.csv") as f:
reader = csv.reader(f)
chosen_row = random.choice(list(reader))
chosen_row = ",".join([str(elem) for elem in chosen_row])
_ = sqs.send_message(QueueUrl=QUEUE_URL, MessageBody=(chosen_row))
|
[] |
[] |
[
"QUEUE_URL",
"DATA_URL"
] |
[]
|
["QUEUE_URL", "DATA_URL"]
|
python
| 2 | 0 | |
dev/merge_spark_pr.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache
# Spark.
# usage: ./merge_spark_pr.py (see config env vars below)
#
# This utility assumes you already have a local Spark git folder and that you
# have added remotes corresponding to both (i) the github apache Spark
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import traceback
import urllib2
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
if sys.version < '3':
input = raw_input
# Location of your Spark git development area
SPARK_HOME = os.environ.get("SPARK_HOME", os.getcwd())
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/apache/spark/pull"
GITHUB_API_BASE = "https://api.github.com/repos/apache/spark"
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
print("Exceeded the GitHub API rate limit; see the instructions in " +
"dev/merge_spark_pr.py to configure an OAuth token for making authenticated " +
"GitHub requests.")
else:
print("Unable to fetch URL, exiting: %s" % url)
sys.exit(-1)
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print(cmd)
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
if 'original_head' in globals():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = input(
"Enter primary author in the format of \"name <email>\" [%s]: " %
distinct_authors[0])
if primary_author == "":
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Spark.
merge_message_flags += ["-m", body.replace("@", "")]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += ["-m", "Closes #%s from %s." % (pr_num, pr_repo_desc)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = choose_jira_assignee(issue, asf_jira)
# Check again, we might not have chosen an assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print("=== JIRA %s ===" % jira_id)
print("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" %
(cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("SPARK")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions=jira_fix_versions,
comment=comment, resolution={'id': resolution.raw['id']})
print("Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions))
def choose_jira_assignee(issue, asf_jira):
"""
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
"""
while True:
try:
reporter = issue.fields.reporter
commentors = map(lambda x: x.author, issue.fields.comment.comments)
candidates = set(commentors)
candidates.add(reporter)
candidates = list(candidates)
print("JIRA is unassigned, choose assignee")
for idx, author in enumerate(candidates):
if author.key == "apachespark":
continue
annotations = ["Reporter"] if author == reporter else []
if author in commentors:
annotations.append("Commentor")
print("[%d] %s (%s)" % (idx, author.displayName, ",".join(annotations)))
raw_assignee = input(
"Enter number of user, or userid, to assign to (blank to leave unassigned):")
if raw_assignee == "":
return None
else:
try:
id = int(raw_assignee)
assignee = candidates[id]
except:
# assume it's a user id, and try to assign (might fail, we just prompt again)
assignee = asf_jira.user(raw_assignee)
asf_jira.assign_issue(issue.key, assignee.key)
return assignee
except:
traceback.print_exc()
print("Error assigning JIRA, try again (or leave blank and fix manually)")
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("SPARK-[0-9]{4,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to
"[SPARK-XXX][MLLIB] Issue"
>>> standardize_jira_ref(
... "[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821][SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref(
... "[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123][PROJECT INFRA][WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954][MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref(
... "SPARK-1094 Support MiMa for reporting binary compatibility accross versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility accross versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146][WIP] Vagrant support for Spark'
>>> standardize_jira_ref(
... "SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref(
... "[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\](\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ''.join(jira_refs).strip() + ''.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were
# included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def get_current_ref():
ref = run_cmd("git rev-parse --abbrev-ref HEAD").strip()
if ref == 'HEAD':
# The current ref is a detached HEAD, so grab its SHA.
return run_cmd("git rev-parse HEAD").strip()
else:
return ref
def main():
global original_head
os.chdir(SPARK_HOME)
original_head = get_current_ref()
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = sorted(branch_names, reverse=True)[0]
pr_num = input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(pr["title"])
if modified_title != pr["title"]:
print("I've re-written the title as follows to match the standard format:")
print("Original: %s" % pr["title"])
print("Modified: %s" % modified_title)
result = input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
title = modified_title
print("Using modified title:")
else:
title = pr["title"]
print("Using original title:")
print(title)
else:
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print("Pull request %s has already been merged, assuming you want to backport" % pr_num)
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print("Found commit %s:\n%s" % (merge_hash, message))
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" %
(title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
if JIRA_USERNAME and JIRA_PASSWORD:
continue_maybe("Would you like to update an associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % \
(pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(title, merged_refs, jira_comment)
else:
print("JIRA_USERNAME and JIRA_PASSWORD not set")
print("Exiting without trying to close the associated JIRA.")
else:
print("Could not find jira-python library. Run 'sudo pip install jira' to install.")
print("Exiting without trying to close the associated JIRA.")
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
try:
main()
except:
clean_up()
raise
|
[] |
[] |
[
"PR_REMOTE_NAME",
"JIRA_PASSWORD",
"GITHUB_OAUTH_KEY",
"JIRA_USERNAME",
"SPARK_HOME",
"PUSH_REMOTE_NAME"
] |
[]
|
["PR_REMOTE_NAME", "JIRA_PASSWORD", "GITHUB_OAUTH_KEY", "JIRA_USERNAME", "SPARK_HOME", "PUSH_REMOTE_NAME"]
|
python
| 6 | 0 | |
vendor/github.com/mweagle/Sparta/aws/cloudformation/util.go
|
package cloudformation
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
gocf "github.com/mweagle/go-cloudformation"
"io"
"io/ioutil"
"math/rand"
"os"
"regexp"
"strconv"
"strings"
"text/template"
"time"
)
var cloudFormationStackTemplateMap map[string]*gocf.Template
func init() {
cloudFormationStackTemplateMap = make(map[string]*gocf.Template, 0)
rand.Seed(time.Now().Unix())
}
// RE to ensure CloudFormation compatible resource names
// Issue: https://github.com/mweagle/Sparta/issues/8
// Ref: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/resources-section-structure.html
var reCloudFormationInvalidChars = regexp.MustCompile("[^A-Za-z0-9]+")
////////////////////////////////////////////////////////////////////////////////
// Private
////////////////////////////////////////////////////////////////////////////////
// BEGIN - templateConverter
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"\\s*(Ref|Fn::GetAtt|Fn::FindInMap)")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// END - templateConverter
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode(&t)
if nil != jsonDecodeErr {
return nil, jsonDecodeErr
}
template = &t
}
cloudFormationStackTemplateMap[serviceName] = template
} else {
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Debug("Using cached CloudFormation Template resources")
}
return template, nil
}
func updateStackViaChangeSet(serviceName string,
cfTemplate *gocf.Template,
cfTemplateURL string,
awsTags []*cloudformation.Tag,
awsCloudFormation *cloudformation.CloudFormation,
logger *logrus.Logger) error {
// Create a change set name...
changeSetRequestName := CloudFormationResourceName(fmt.Sprintf("%sChangeSet", serviceName))
_, changesErr := CreateStackChangeSet(changeSetRequestName,
serviceName,
cfTemplate,
cfTemplateURL,
awsTags,
awsCloudFormation,
logger)
if nil != changesErr {
return changesErr
}
//////////////////////////////////////////////////////////////////////////////
// Apply the change
executeChangeSetInput := cloudformation.ExecuteChangeSetInput{
ChangeSetName: aws.String(changeSetRequestName),
StackName: aws.String(serviceName),
}
executeChangeSetOutput, executeChangeSetError := awsCloudFormation.ExecuteChangeSet(&executeChangeSetInput)
logger.WithFields(logrus.Fields{
"ExecuteChangeSetOutput": executeChangeSetOutput,
}).Debug("ExecuteChangeSet result")
if nil == executeChangeSetError {
logger.WithFields(logrus.Fields{
"StackName": serviceName,
}).Info("Issued ExecuteChangeSet request")
}
return executeChangeSetError
}
func existingLambdaResourceVersions(serviceName string,
lambdaResourceName string,
session *session.Session,
logger *logrus.Logger) (*lambda.ListVersionsByFunctionOutput, error) {
errorIsNotExist := func(apiError error) bool {
return apiError != nil && strings.Contains(apiError.Error(), "does not exist")
}
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
}).Info("Fetching existing function versions")
cloudFormationSvc := cloudformation.New(session)
describeParams := &cloudformation.DescribeStackResourceInput{
StackName: aws.String(serviceName),
LogicalResourceId: aws.String(lambdaResourceName),
}
describeResponse, describeResponseErr := cloudFormationSvc.DescribeStackResource(describeParams)
logger.WithFields(logrus.Fields{
"Response": describeResponse,
"ResponseErr": describeResponseErr,
}).Debug("Describe response")
if errorIsNotExist(describeResponseErr) {
return nil, nil
} else if describeResponseErr != nil {
return nil, describeResponseErr
}
listVersionsParams := &lambda.ListVersionsByFunctionInput{
FunctionName: describeResponse.StackResourceDetail.PhysicalResourceId,
MaxItems: aws.Int64(128),
}
lambdaSvc := lambda.New(session)
listVersionsResp, listVersionsRespErr := lambdaSvc.ListVersionsByFunction(listVersionsParams)
if errorIsNotExist(listVersionsRespErr) {
return nil, nil
} else if listVersionsRespErr != nil {
return nil, listVersionsRespErr
}
logger.WithFields(logrus.Fields{
"Response": listVersionsResp,
"ResponseErr": listVersionsRespErr,
}).Debug("ListVersionsByFunction")
return listVersionsResp, nil
}
func toExpressionSlice(input interface{}) ([]string, error) {
var expressions []string
slice, sliceOK := input.([]interface{})
if !sliceOK {
return nil, fmt.Errorf("Failed to convert to slice")
}
for _, eachValue := range slice {
switch str := eachValue.(type) {
case string:
expressions = append(expressions, str)
}
}
return expressions, nil
}
func parseFnJoinExpr(data map[string]interface{}) (*gocf.StringExpr, error) {
if len(data) <= 0 {
return nil, fmt.Errorf("FnJoinExpr data is empty")
}
for eachKey, eachValue := range data {
switch eachKey {
case "Ref":
return gocf.Ref(eachValue.(string)).String(), nil
case "Fn::GetAtt":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 2 {
return nil, fmt.Errorf("Invalid params for Fn::GetAtt: %s", eachValue)
}
return gocf.GetAtt(attrValues[0], attrValues[1]).String(), nil
case "Fn::FindInMap":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 3 {
return nil, fmt.Errorf("Invalid params for Fn::FindInMap: %s", eachValue)
}
return gocf.FindInMap(attrValues[0], gocf.String(attrValues[1]), gocf.String(attrValues[2])), nil
}
}
return nil, fmt.Errorf("Unsupported AWS Function detected: %#v", data)
}
func stackCapabilities(template *gocf.Template) []*string {
// Only require IAM capability if the definition requires it.
capabilities := make([]*string, 0)
for _, eachResource := range template.Resources {
if eachResource.Properties.CfnResourceType() == "AWS::IAM::Role" {
found := false
for _, eachElement := range capabilities {
found = (found || (*eachElement == "CAPABILITY_IAM"))
}
if !found {
capabilities = append(capabilities, aws.String("CAPABILITY_IAM"))
}
}
}
return capabilities
}
////////////////////////////////////////////////////////////////////////////////
// Public
////////////////////////////////////////////////////////////////////////////////
// S3AllKeysArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) for all bucket keys (`/*`). The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3AllKeysArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
arnParts = append(arnParts, gocf.String("/*"))
return gocf.Join("", arnParts...).String()
}
// S3ArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) suitable for template reference. The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3ArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
return gocf.Join("", arnParts...).String()
}
// MapToResourceTags transforms a go map[string]string to a CloudFormation-compliant
// Tags representation. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
func MapToResourceTags(tagMap map[string]string) []interface{} {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
// AutoIncrementingLambdaVersionInfo is dynamically populated during
// a call AddAutoIncrementingLambdaVersionResource. The VersionHistory
// is a map of published versions to their CloudFormation resource names
type AutoIncrementingLambdaVersionInfo struct {
// The version that will be published as part of this operation
CurrentVersion int
// The CloudFormation resource name that defines the
// AWS::Lambda::Version resource to be included with this operation
CurrentVersionResourceName string
// The version history that maps a published version value
// to its CloudFormation resource name. Used for defining lagging
// indicator Alias values
VersionHistory map[int]string
}
// AddAutoIncrementingLambdaVersionResource inserts a new
// AWS::Lambda::Version resource into the template. It uses
// the existing CloudFormation template representation
// to determine the version index to append. The returned
// map is from `versionIndex`->`CloudFormationResourceName`
// to support second-order AWS::Lambda::Alias records on a
// per-version level
func AddAutoIncrementingLambdaVersionResource(serviceName string,
lambdaResourceName string,
cfTemplate *gocf.Template,
logger *logrus.Logger) (*AutoIncrementingLambdaVersionInfo, error) {
// Get the template
session, sessionErr := session.NewSession()
if sessionErr != nil {
return nil, sessionErr
}
// Get the current template - for each version we find in the version listing
// we look up the actual CF resource and copy it into this template
existingStackDefinition, existingStackDefinitionErr := existingStackTemplate(serviceName,
session,
logger)
if nil != existingStackDefinitionErr {
return nil, existingStackDefinitionErr
}
existingVersions, existingVersionsErr := existingLambdaResourceVersions(serviceName,
lambdaResourceName,
session,
logger)
if nil != existingVersionsErr {
return nil, existingVersionsErr
}
// Initialize the auto incrementing version struct
autoIncrementingLambdaVersionInfo := AutoIncrementingLambdaVersionInfo{
CurrentVersion: 0,
CurrentVersionResourceName: "",
VersionHistory: make(map[int]string, 0),
}
lambdaVersionResourceName := func(versionIndex int) string {
return CloudFormationResourceName(lambdaResourceName,
"version",
strconv.Itoa(versionIndex))
}
if nil != existingVersions {
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"VersionCount": len(existingVersions.Versions) - 1, // Ignore $LATEST
"ResourceName": lambdaResourceName,
}).Info("Total number of published versions")
for _, eachEntry := range existingVersions.Versions {
versionIndex, versionIndexErr := strconv.Atoi(*eachEntry.Version)
if nil == versionIndexErr {
// Find the existing resource...
versionResourceName := lambdaVersionResourceName(versionIndex)
if nil == existingStackDefinition {
return nil, fmt.Errorf("Unable to find existing Version resource in nil Template")
}
cfResourceDefinition, cfResourceDefinitionExists := existingStackDefinition.Resources[versionResourceName]
if !cfResourceDefinitionExists {
return nil, fmt.Errorf("Unable to find existing Version resource (Resource: %s, Version: %d) in template",
versionResourceName,
versionIndex)
}
cfTemplate.Resources[versionResourceName] = cfResourceDefinition
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"Version": versionIndex,
"ResourceName": versionResourceName,
}).Debug("Preserving Lambda version")
// Store the state, tracking the latest version
autoIncrementingLambdaVersionInfo.VersionHistory[versionIndex] = versionResourceName
if versionIndex > autoIncrementingLambdaVersionInfo.CurrentVersion {
autoIncrementingLambdaVersionInfo.CurrentVersion = versionIndex
}
}
}
}
// Bump the version and add a new entry...
autoIncrementingLambdaVersionInfo.CurrentVersion++
versionResource := &gocf.LambdaVersion{
FunctionName: gocf.GetAtt(lambdaResourceName, "Arn").String(),
}
autoIncrementingLambdaVersionInfo.CurrentVersionResourceName = lambdaVersionResourceName(autoIncrementingLambdaVersionInfo.CurrentVersion)
cfTemplate.AddResource(autoIncrementingLambdaVersionInfo.CurrentVersionResourceName, versionResource)
// Log the version we're about to publish...
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
"StackVersion": autoIncrementingLambdaVersionInfo.CurrentVersion,
}).Info("Inserting new version resource")
return &autoIncrementingLambdaVersionInfo, nil
}
// StackEvents returns the slice of cloudformation.StackEvents for the given stackID or stackName
func StackEvents(stackID string,
eventFilterLowerBound time.Time,
awsSession *session.Session) ([]*cloudformation.StackEvent, error) {
cfService := cloudformation.New(awsSession)
var events []*cloudformation.StackEvent
nextToken := ""
for {
params := &cloudformation.DescribeStackEventsInput{
StackName: aws.String(stackID),
}
if len(nextToken) > 0 {
params.NextToken = aws.String(nextToken)
}
resp, err := cfService.DescribeStackEvents(params)
if nil != err {
return nil, err
}
for _, eachEvent := range resp.StackEvents {
if eachEvent.Timestamp.After(eventFilterLowerBound) {
events = append(events, eachEvent)
}
}
if nil == resp.NextToken {
break
} else {
nextToken = *resp.NextToken
}
}
return events, nil
}
// WaitForStackOperationCompleteResult encapsulates the stackInfo
// following a WaitForStackOperationComplete call
type WaitForStackOperationCompleteResult struct {
operationSuccessful bool
stackInfo *cloudformation.Stack
}
// WaitForStackOperationComplete is a blocking, polling based call that
// periodically fetches the stackID set of events and uses the state value
// to determine if an operation is complete
func WaitForStackOperationComplete(stackID string,
pollingMessage string,
awsCloudFormation *cloudformation.CloudFormation,
logger *logrus.Logger) (*WaitForStackOperationCompleteResult, error) {
result := &WaitForStackOperationCompleteResult{}
// Poll for the current stackID state, and
describeStacksInput := &cloudformation.DescribeStacksInput{
StackName: aws.String(stackID),
}
for waitComplete := false; !waitComplete; {
sleepDuration := time.Duration(11+rand.Int31n(13)) * time.Second
time.Sleep(sleepDuration)
describeStacksOutput, err := awsCloudFormation.DescribeStacks(describeStacksInput)
if nil != err {
// TODO - add retry iff we're RateExceeded due to collective access
return nil, err
}
if len(describeStacksOutput.Stacks) <= 0 {
return nil, fmt.Errorf("Failed to enumerate stack info: %v", *describeStacksInput.StackName)
}
result.stackInfo = describeStacksOutput.Stacks[0]
switch *(result.stackInfo).StackStatus {
case cloudformation.StackStatusCreateComplete,
cloudformation.StackStatusUpdateComplete:
result.operationSuccessful = true
waitComplete = true
case
// Include DeleteComplete as new provisions will automatically rollback
cloudformation.StackStatusDeleteComplete,
cloudformation.StackStatusCreateFailed,
cloudformation.StackStatusDeleteFailed,
cloudformation.StackStatusRollbackFailed,
cloudformation.StackStatusRollbackComplete,
cloudformation.StackStatusUpdateRollbackComplete:
result.operationSuccessful = false
waitComplete = true
default:
logger.Info(pollingMessage)
}
}
return result, nil
}
// CloudFormationResourceName returns a name suitable as a logical
// CloudFormation resource value. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/resources-section-structure.html
// for more information. The `prefix` value should provide a hint as to the
// resource type (eg, `SNSConfigurator`, `ImageTranscoder`). Note that the returned
// name is not content-addressable.
func CloudFormationResourceName(prefix string, parts ...string) string {
hash := sha1.New()
hash.Write([]byte(prefix))
if len(parts) <= 0 {
randValue := rand.Int63()
hash.Write([]byte(strconv.FormatInt(randValue, 10)))
} else {
for _, eachPart := range parts {
hash.Write([]byte(eachPart))
}
}
resourceName := fmt.Sprintf("%s%s", prefix, hex.EncodeToString(hash.Sum(nil)))
// Ensure that any non alphanumeric characters are replaced with ""
return reCloudFormationInvalidChars.ReplaceAllString(resourceName, "x")
}
// UploadTemplate marshals the given cfTemplate and uploads it to the
// supplied bucket using the given KeyName
func UploadTemplate(serviceName string,
cfTemplate *gocf.Template,
s3Bucket string,
s3KeyName string,
awsSession *session.Session,
logger *logrus.Logger) (string, error) {
logger.WithFields(logrus.Fields{
"Key": s3KeyName,
"Bucket": s3Bucket,
}).Info("Uploading CloudFormation template")
s3Uploader := s3manager.NewUploader(awsSession)
// Serialize the template and upload it
cfTemplateJSON, err := json.Marshal(cfTemplate)
if err != nil {
logger.Error("Failed to Marshal CloudFormation template: ", err.Error())
return "", err
}
// Upload the actual CloudFormation template to S3 to maximize the template
// size limit
// Ref: http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStack.html
contentBody := string(cfTemplateJSON)
uploadInput := &s3manager.UploadInput{
Bucket: &s3Bucket,
Key: &s3KeyName,
ContentType: aws.String("application/json"),
Body: strings.NewReader(contentBody),
}
templateUploadResult, templateUploadResultErr := s3Uploader.Upload(uploadInput)
if nil != templateUploadResultErr {
return "", templateUploadResultErr
}
// Be transparent
logger.WithFields(logrus.Fields{
"URL": templateUploadResult.Location,
}).Info("Template uploaded")
return templateUploadResult.Location, nil
}
// StackExists returns whether the given stackName or stackID currently exists
func StackExists(stackNameOrID string, awsSession *session.Session, logger *logrus.Logger) (bool, error) {
cf := cloudformation.New(awsSession)
describeStacksInput := &cloudformation.DescribeStacksInput{
StackName: aws.String(stackNameOrID),
}
describeStacksOutput, err := cf.DescribeStacks(describeStacksInput)
logger.WithFields(logrus.Fields{
"DescribeStackOutput": describeStacksOutput,
}).Debug("DescribeStackOutput results")
exists := false
if err != nil {
logger.WithFields(logrus.Fields{
"DescribeStackOutputError": err,
}).Debug("DescribeStackOutput")
// If the stack doesn't exist, then no worries
if strings.Contains(err.Error(), "does not exist") {
exists = false
} else {
return false, err
}
} else {
exists = true
}
return exists, nil
}
// CreateStackChangeSet returns the DescribeChangeSetOutput
// for a given stack transformation
func CreateStackChangeSet(changeSetRequestName string,
serviceName string,
cfTemplate *gocf.Template,
templateURL string,
awsTags []*cloudformation.Tag,
awsCloudFormation *cloudformation.CloudFormation,
logger *logrus.Logger) (*cloudformation.DescribeChangeSetOutput, error) {
capabilities := stackCapabilities(cfTemplate)
changeSetInput := &cloudformation.CreateChangeSetInput{
Capabilities: capabilities,
ChangeSetName: aws.String(changeSetRequestName),
ClientToken: aws.String(changeSetRequestName),
Description: aws.String(fmt.Sprintf("Change set for service: %s", serviceName)),
StackName: aws.String(serviceName),
TemplateURL: aws.String(templateURL),
}
if len(awsTags) != 0 {
changeSetInput.Tags = awsTags
}
_, changeSetError := awsCloudFormation.CreateChangeSet(changeSetInput)
if nil != changeSetError {
return nil, changeSetError
}
logger.WithFields(logrus.Fields{
"StackName": serviceName,
}).Info("Issued CreateChangeSet request")
describeChangeSetInput := cloudformation.DescribeChangeSetInput{
ChangeSetName: aws.String(changeSetRequestName),
StackName: aws.String(serviceName),
}
var describeChangeSetOutput *cloudformation.DescribeChangeSetOutput
for i := 0; i != 5; i++ {
sleepDuration := time.Duration(3+rand.Int31n(5)) * time.Second
time.Sleep(sleepDuration)
changeSetOutput, describeChangeSetError := awsCloudFormation.DescribeChangeSet(&describeChangeSetInput)
if nil != describeChangeSetError {
return nil, describeChangeSetError
}
describeChangeSetOutput = changeSetOutput
if nil != describeChangeSetOutput &&
*describeChangeSetOutput.Status == "CREATE_COMPLETE" {
break
}
}
if nil == describeChangeSetOutput {
return nil, fmt.Errorf("ChangeSet failed to stabilize: %s", changeSetRequestName)
}
logger.WithFields(logrus.Fields{
"DescribeChangeSetOutput": describeChangeSetOutput,
}).Debug("DescribeChangeSet result")
//////////////////////////////////////////////////////////////////////////////
// If there aren't any changes, then skip it...
if len(describeChangeSetOutput.Changes) <= 0 {
logger.WithFields(logrus.Fields{
"StackName": serviceName,
}).Info("No changes detected for service")
// Delete it...
_, deleteChangeSetResultErr := DeleteChangeSet(serviceName,
changeSetRequestName,
awsCloudFormation)
return nil, deleteChangeSetResultErr
}
return describeChangeSetOutput, nil
}
// DeleteChangeSet is a utility function that attempts to delete
// an existing CloudFormation change set, with a bit of retry
// logic in case of EC
func DeleteChangeSet(stackName string,
changeSetRequestName string,
awsCloudFormation *cloudformation.CloudFormation) (*cloudformation.DeleteChangeSetOutput, error) {
// Delete request...
deleteChangeSetInput := cloudformation.DeleteChangeSetInput{
ChangeSetName: aws.String(changeSetRequestName),
StackName: aws.String(stackName),
}
var delChangeSetResultErr error
for i := 0; i != 5; i++ {
deleteChangeSetResults, deleteChangeSetResultErr :=
awsCloudFormation.DeleteChangeSet(&deleteChangeSetInput)
if nil == deleteChangeSetResultErr {
return deleteChangeSetResults, nil
} else if strings.Contains(deleteChangeSetResultErr.Error(), "CREATE_IN_PROGRESS") {
delChangeSetResultErr = deleteChangeSetResultErr
sleepDuration := time.Duration(1+rand.Int31n(5)) * time.Second
time.Sleep(sleepDuration)
} else {
return nil, deleteChangeSetResultErr
}
}
return nil, delChangeSetResultErr
}
// ConvergeStackState ensures that the serviceName converges to the template
// state defined by cfTemplate. This function establishes a polling loop to determine
// when the stack operation has completed.
func ConvergeStackState(serviceName string,
cfTemplate *gocf.Template,
templateURL string,
tags map[string]string,
startTime time.Time,
awsSession *session.Session,
logger *logrus.Logger) (*cloudformation.Stack, error) {
awsCloudFormation := cloudformation.New(awsSession)
// Update the tags
awsTags := make([]*cloudformation.Tag, 0)
if nil != tags {
for eachKey, eachValue := range tags {
awsTags = append(awsTags,
&cloudformation.Tag{
Key: aws.String(eachKey),
Value: aws.String(eachValue),
})
}
}
exists, existsErr := StackExists(serviceName, awsSession, logger)
if nil != existsErr {
return nil, existsErr
}
stackID := ""
if exists {
updateErr := updateStackViaChangeSet(serviceName,
cfTemplate,
templateURL,
awsTags,
awsCloudFormation,
logger)
if nil != updateErr {
return nil, updateErr
}
stackID = serviceName
} else {
// Create stack
createStackInput := &cloudformation.CreateStackInput{
StackName: aws.String(serviceName),
TemplateURL: aws.String(templateURL),
TimeoutInMinutes: aws.Int64(20),
OnFailure: aws.String(cloudformation.OnFailureDelete),
Capabilities: stackCapabilities(cfTemplate),
}
if len(awsTags) != 0 {
createStackInput.Tags = awsTags
}
createStackResponse, createStackResponseErr := awsCloudFormation.CreateStack(createStackInput)
if nil != createStackResponseErr {
return nil, createStackResponseErr
}
logger.WithFields(logrus.Fields{
"StackID": *createStackResponse.StackId,
}).Info("Creating stack")
stackID = *createStackResponse.StackId
}
// Wait for the operation to succeed
pollingMessage := "Waiting for CloudFormation operation to complete"
convergeResult, convergeErr := WaitForStackOperationComplete(stackID,
pollingMessage,
awsCloudFormation,
logger)
if nil != convergeErr {
return nil, convergeErr
}
// If it didn't work, then output some failure information
if !convergeResult.operationSuccessful {
// Get the stack events and find the ones that failed.
events, err := StackEvents(stackID, startTime, awsSession)
if nil != err {
return nil, err
}
logger.Error("Stack provisioning error")
for _, eachEvent := range events {
switch *eachEvent.ResourceStatus {
case cloudformation.ResourceStatusCreateFailed,
cloudformation.ResourceStatusDeleteFailed,
cloudformation.ResourceStatusUpdateFailed:
errMsg := fmt.Sprintf("\tError ensuring %s (%s): %s",
aws.StringValue(eachEvent.ResourceType),
aws.StringValue(eachEvent.LogicalResourceId),
aws.StringValue(eachEvent.ResourceStatusReason))
logger.Error(errMsg)
default:
// NOP
}
}
return nil, fmt.Errorf("Failed to provision: %s", serviceName)
} else if nil != convergeResult.stackInfo.Outputs {
for _, eachOutput := range convergeResult.stackInfo.Outputs {
logger.WithFields(logrus.Fields{
"Key": aws.StringValue(eachOutput.OutputKey),
"Value": aws.StringValue(eachOutput.OutputValue),
"Description": aws.StringValue(eachOutput.Description),
}).Info("Stack output")
}
}
return convergeResult.stackInfo, nil
}
// If the platform specific implementation of user.Current()
// isn't available, go get something that's a "stable" user
// name
func defaultUserName() string {
userName := os.Getenv("USER")
if "" == userName {
userName = os.Getenv("USERNAME")
}
if "" == userName {
userName = fmt.Sprintf("user%d", os.Getuid())
}
return userName
}
// UserScopedStackName returns a CloudFormation stack
// name that takes into account the current username
/*
A stack name can contain only alphanumeric characters
(case sensitive) and hyphens. It must start with an alphabetic
\character and cannot be longer than 128 characters.
*/
func UserScopedStackName(basename string) string {
platformUserName := platformUserName()
if platformUserName == "" {
return basename
}
userName := strings.Replace(platformUserName, " ", "-", -1)
return fmt.Sprintf("%s-%s", basename, userName)
}
|
[
"\"USER\"",
"\"USERNAME\""
] |
[] |
[
"USER",
"USERNAME"
] |
[]
|
["USER", "USERNAME"]
|
go
| 2 | 0 | |
text/src/autogluon/text/text_prediction/mx/models.py
|
import numpy as np
import scipy.special
import os
import math
import logging
import pandas as pd
import warnings
import time
import json
import pickle
import functools
import tqdm
from typing import Tuple
from autogluon.core.scheduler.scheduler_factory import scheduler_factory
from autogluon.core.utils import set_logger_verbosity
from sklearn.preprocessing import LabelEncoder
import mxnet as mx
from mxnet.util import use_np
from mxnet.lr_scheduler import PolyScheduler, CosineScheduler
from mxnet.gluon.data import DataLoader
from autogluon_contrib_nlp.models import get_backbone
from autogluon_contrib_nlp.lr_scheduler import InverseSquareRootScheduler
from autogluon_contrib_nlp.utils.config import CfgNode
from autogluon_contrib_nlp.utils.misc import grouper, \
count_parameters, repeat, get_mxnet_available_ctx
from autogluon_contrib_nlp.utils.parameter import move_to_ctx, clip_grad_global_norm
from autogluon.core import args, space
from autogluon.core.utils import in_ipynb, verbosity2loglevel
from autogluon.core.utils.utils import get_cpu_count, get_gpu_count
from autogluon.core.utils.loaders import load_pkl, load_pd
from autogluon.core.task.base import compile_scheduler_options_v2
from autogluon.core.task.base.base_task import schedulers
from autogluon.core.metrics import get_metric, Scorer
from autogluon.core.utils.multiprocessing_utils import force_forkserver
from autogluon.core.dataset import TabularDataset
from autogluon.core.decorator import sample_config
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION
from autogluon.core.scheduler.reporter import FakeReporter
from .modules import MultiModalWithPretrainedTextNN
from .preprocessing import MultiModalTextFeatureProcessor, base_preprocess_cfg,\
MultiModalTextBatchify, get_stats_string, auto_shrink_max_length, get_cls_sep_id
from .utils import average_checkpoints, set_seed
from .. import constants as _C
from ..utils import logging_config
from ..presets import ag_text_presets
from ... import version
logger = logging.getLogger(__name__) # return logger
@use_np
def get_optimizer(cfg, updates_per_epoch):
"""
Parameters
----------
cfg
Configuration
updates_per_epoch
The number of updates per training epoch
Returns
-------
optimizer
The optimizer
optimizer_params
Optimization parameters
max_update
Maximum update
"""
max_update = max(int(np.ceil(updates_per_epoch * cfg.num_train_epochs)), 3)
warmup_steps = int(np.ceil(updates_per_epoch * cfg.num_train_epochs * cfg.warmup_portion))
if cfg.lr_scheduler == 'triangular':
lr_scheduler = PolyScheduler(max_update=max_update,
base_lr=cfg.lr,
warmup_begin_lr=cfg.begin_lr,
pwr=1,
final_lr=cfg.final_lr,
warmup_steps=warmup_steps,
warmup_mode='linear')
elif cfg.lr_scheduler == 'inv_sqrt':
lr_scheduler = InverseSquareRootScheduler(warmup_steps=warmup_steps,
base_lr=cfg.lr,
warmup_init_lr=cfg.begin_lr)
elif cfg.lr_scheduler == 'constant':
lr_scheduler = None
elif cfg.lr_scheduler == 'cosine':
lr_scheduler = CosineScheduler(max_update=max_update,
base_lr=cfg.lr,
final_lr=cfg.final_lr,
warmup_steps=warmup_steps,
warmup_begin_lr=cfg.begin_lr)
else:
raise ValueError('Unsupported lr_scheduler="{}"'
.format(cfg.lr_scheduler))
optimizer_params = {'learning_rate': cfg.lr,
'wd': cfg.wd,
'lr_scheduler': lr_scheduler}
optimizer = cfg.optimizer
additional_params = {key: value for key, value in cfg.optimizer_params}
optimizer_params.update(additional_params)
return optimizer, optimizer_params, max_update
@use_np
def apply_layerwise_decay(model, layerwise_decay, backbone_name, not_included=None):
"""Apply the layer-wise gradient decay
.. math::
lr = lr * layerwise_decay^(max_depth - layer_depth)
Parameters:
----------
model
The backbone model
layerwise_decay: int
layer-wise decay power
not_included: list of str
A list or parameter names that not included in the layer-wise decay
"""
if not_included is None:
not_included = []
# consider the task specific fine-tuning layer as the last layer, following with pooler
# In addition, the embedding parameters have the smaller learning rate based on this setting.
if 'albert' in backbone_name:
# Skip if it is the ALBERT model.
return
if 'electra' in backbone_name:
# For ELECTRA, it's called all_encoder_layers
all_layers = model.encoder.all_encoder_layers
else:
# For other models, it's called all_layers
all_layers = model.encoder.all_layers
max_depth = len(all_layers) + 2
for key, value in model.collect_params().items():
if 'scores' in key:
value.lr_mult = layerwise_decay ** 0
if 'pool' in key:
value.lr_mult = layerwise_decay ** 1
if 'embed' in key:
value.lr_mult = layerwise_decay ** max_depth
for (layer_depth, layer) in enumerate(all_layers):
layer_params = layer.collect_params()
for key, value in layer_params.items():
for pn in not_included:
if pn in key:
continue
value.lr_mult = layerwise_decay ** (max_depth - (layer_depth + 1))
@use_np
def freeze_layers(model, backbone_name, num_trainable_layers):
if 'albert' in backbone_name:
# Skip if it is the ALBERT model.
return
if 'electra' in backbone_name:
# For ELECTRA, it's called all_encoder_layers
all_layers = model.encoder.all_encoder_layers
else:
# For other models, it's called all_layers
all_layers = model.encoder.all_layers
if num_trainable_layers < 0:
return
assert num_trainable_layers <= len(all_layers)
for i in range(len(all_layers) - num_trainable_layers):
for p in all_layers[i].collect_params().values():
p.grad_req = 'null'
return
def base_optimization_config():
"""The basic optimization phase"""
cfg = CfgNode()
cfg.lr_scheduler = 'triangular'
cfg.optimizer = 'adamw'
cfg.early_stopping_patience = 20 # Stop if we cannot find a better checkpoint
cfg.optimizer_params = [('beta1', 0.9),
('beta2', 0.999),
('epsilon', 1e-6),
('correct_bias', False)]
cfg.begin_lr = 0.0
cfg.batch_size = 128
cfg.nbest = 1 # Keep the top K performed models
cfg.per_device_batch_size = 16 # Per-device batch-size
cfg.auto_per_device_batch_size = True # Whether to automatically determine the runnable
# per-device batch_size.
cfg.val_batch_size_mult = 2 # By default, we 2X the batch size for validation
cfg.lr = 1E-4
cfg.final_lr = 0.0
cfg.num_train_epochs = 10
cfg.warmup_portion = 0.1
cfg.layerwise_lr_decay = 0.8 # The layer_wise decay
cfg.wd = 0.01 # Weight Decay
cfg.max_grad_norm = 1.0 # Maximum Gradient Norm
# The validation frequency = validation frequency * num_updates_in_an_epoch
cfg.valid_frequency = 0.2
# Logging frequency = log frequency * num_updates_in_an_epoch
cfg.log_frequency = 0.05
return cfg
def base_model_config():
cfg = CfgNode()
cfg.backbone = CfgNode()
cfg.backbone.name = 'google_electra_base'
cfg.network = MultiModalWithPretrainedTextNN.get_cfg()
cfg.num_trainable_layers = -1 # Use a negative number to indicate that all layers are trainable.
cfg.insert_sep = True # Whether to insert sep tokens between columns
cfg.train_stochastic_chunk = False # Whether to sample a stochastic chunk from the training text
cfg.test_stochastic_chunk = False # Whether to use stochastic chunk in testing
cfg.use_avg_nbest = False # Whether to average the top performed models and use that as the final model.
# This will usually give us better performance.
cfg._disable_update = False # This is a hack for trying to disable the update. Should not be used usually
cfg.inference_num_repeat = 1 # Whether to turn on randomness and repeat the inference for multiple times.
return cfg
def base_misc_config():
cfg = CfgNode()
cfg.seed = 123
cfg.exp_dir = './autonlp'
return cfg
def base_cfg():
cfg = CfgNode()
cfg.version = 1
cfg.optimization = base_optimization_config()
cfg.preprocessing = base_preprocess_cfg()
cfg.model = base_model_config()
cfg.misc = base_misc_config()
cfg.freeze()
return cfg
@use_np
def _classification_regression_predict(net, dataloader, problem_type, label_scaler,
has_label=True, extract_embedding=False,
num_repeat=1):
"""
Parameters
----------
net
The network
dataloader
The dataloader
problem_type
Types of the labels
label_scaler
Label scaler. We will reverse the centering process for regression problem
has_label
Whether label is used
extract_embedding
Whether to extract the embedding
num_repeat
The number of repeats to get the prediction.
If it is larger than 1, we will average the predictions.
If it is a regression problem, we will directly average the outputs.
If it is a classification problem, we will average the logits
Returns
-------
predictions
The predictions
"""
import warnings
# Filter mxnet warnings
warnings.filterwarnings('ignore', module='mxnet')
predictions = [[] for _ in range(num_repeat)]
use_logits = num_repeat > 1 and (problem_type == MULTICLASS or problem_type == BINARY)\
and not extract_embedding
if use_logits:
logits = [[] for _ in range(num_repeat)]
ctx_l = net.collect_params().list_ctx()
for i in range(num_repeat):
for sample_l in grouper(dataloader, len(ctx_l)):
iter_pred_l = []
if use_logits:
iter_logits_l = []
for sample, ctx in zip(sample_l, ctx_l):
if sample is None:
continue
if has_label:
batch_feature, batch_label = sample
else:
batch_feature = sample
batch_feature = move_to_ctx(batch_feature, ctx)
if extract_embedding:
_, embeddings = net(batch_feature)
iter_pred_l.append(embeddings)
else:
pred = net(batch_feature)
if problem_type == MULTICLASS or problem_type == BINARY:
if num_repeat > 1:
iter_logits_l.append(pred)
pred = mx.npx.softmax(pred, axis=-1)
iter_pred_l.append(pred)
for pred in iter_pred_l:
predictions[i].append(pred.asnumpy())
if use_logits:
for ele in iter_logits_l:
logits[i].append(ele.asnumpy())
predictions[i] = np.concatenate(predictions[i], axis=0)
if problem_type == REGRESSION and not extract_embedding:
predictions[i] = label_scaler.inverse_transform(predictions[i])[:, 0]
if use_logits:
logits[i] = np.concatenate(logits[i], axis=0)
if num_repeat == 1:
return predictions[0]
else:
if use_logits:
logits = np.stack(logits, axis=0).mean(axis=0)
return scipy.special.softmax(logits, axis=-1)
else:
return np.stack(predictions, axis=0).mean(axis=0)
def calculate_metric(scorer, ground_truth, predictions, problem_type):
if problem_type == BINARY and scorer.name == 'roc_auc':
# For ROC_AUC, we need to feed in the probability of positive class to the scorer.
return scorer._sign * scorer(ground_truth, predictions[:, 1])
else:
return scorer._sign * scorer(ground_truth, predictions)
@use_np
def train_function(args, reporter, train_df_path, tuning_df_path,
time_limit, time_start, base_config,
problem_type, column_types,
feature_columns, label_column,
log_metrics, eval_metric, ngpus_per_trial,
console_log, seed=None, verbosity=2):
"""
Parameters
----------
args
The arguments
reporter
Reporter of the HPO scheduler.
If it is set to None, we won't use the reporter and will just run a single trial.
train_df_path
Path of the training dataframe
tuning_df_path
Path of the tuning dataframe
time_limit
The time limit of calling this function
time_start
The starting timestamp of the experiment
base_config
Basic configuration
problem_type
Type of the problem.
column_types
Type of columns
feature_columns
The feature columns
label_column
Label column
log_metrics
Metrics for logging
eval_metric
The stopping metric
ngpus_per_trial
The number of GPUs to use per each trial
console_log
Whether to log it to console
seed
The random seed
verbosity
The verbosity
"""
import warnings
warnings.filterwarnings('ignore', module='mxnet')
warnings.filterwarnings('ignore', module='sklearn')
set_seed(seed)
is_fake_reporter = isinstance(reporter, FakeReporter)
if time_limit is not None:
start_train_tick = time.time()
time_left = time_limit - (start_train_tick - time_start)
if time_left <= 0:
if not is_fake_reporter:
reporter.terminate()
return
if is_fake_reporter:
search_space = args.rand
task_id = 0
else:
search_space = args['search_space']
task_id = args.task_id
# Get the log metric scorers
if isinstance(log_metrics, str):
log_metrics = [log_metrics]
# Load the training and tuning data from the parquet file
train_data = pd.read_pickle(train_df_path)
tuning_data = pd.read_pickle(tuning_df_path)
log_metric_scorers = [get_metric(ele) for ele in log_metrics]
eval_metric_scorer = get_metric(eval_metric)
greater_is_better = eval_metric_scorer.greater_is_better
cfg = base_config.clone()
specified_values = []
for key in search_space.keys():
specified_values.append(key)
specified_values.append(search_space[key])
cfg.merge_from_list(specified_values)
exp_dir = cfg.misc.exp_dir
exp_dir = os.path.join(exp_dir, 'task{}'.format(task_id))
os.makedirs(exp_dir, exist_ok=True)
cfg.defrost()
cfg.misc.exp_dir = exp_dir
cfg.freeze()
logger = logging.getLogger()
set_logger_verbosity(verbosity, logger)
logging_config(folder=exp_dir, name='training', logger=logger, console=console_log,
level=logging.DEBUG,
console_level=verbosity2loglevel(verbosity))
logger.log(10, cfg)
# Load backbone model
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
# Build Preprocessor + Preprocess the training dataset + Inference problem type
# TODO Dynamically cache the preprocessor that has been fitted.
if problem_type == MULTICLASS or problem_type == BINARY:
label_generator = LabelEncoder()
label_generator.fit(pd.concat([train_data[label_column], tuning_data[label_column]]))
else:
label_generator = None
preprocessor = MultiModalTextFeatureProcessor(column_types=column_types,
label_column=label_column,
tokenizer_name=cfg.model.backbone.name,
label_generator=label_generator,
cfg=cfg.preprocessing)
logger.info('Fitting and transforming the train data...')
train_dataset = preprocessor.fit_transform(train_data[feature_columns],
train_data[label_column])
with open(os.path.join(exp_dir, 'preprocessor.pkl'), 'wb') as of:
pickle.dump(preprocessor, of)
logger.info(f'Done! Preprocessor saved to {os.path.join(exp_dir, "preprocessor.pkl")}')
logger.log(10, 'Train Data')
logger.log(10, get_stats_string(preprocessor, train_dataset, is_train=True))
logger.info('Process dev set...')
tuning_dataset = preprocessor.transform(tuning_data[feature_columns],
tuning_data[label_column])
logger.info('Done!')
# Auto Max Length
if cfg.preprocessing.text.auto_max_length:
max_length = auto_shrink_max_length(
train_dataset,
insert_sep=cfg.model.insert_sep,
num_text_features=len(preprocessor.text_feature_names),
auto_max_length_quantile=cfg.preprocessing.text.auto_max_length_quantile,
round_to=cfg.preprocessing.text.auto_max_length_round_to,
max_length=cfg.preprocessing.text.max_length)
else:
max_length = cfg.preprocessing.text.max_length
train_stochastic_chunk = cfg.model.train_stochastic_chunk
test_stochastic_chunk = cfg.model.test_stochastic_chunk
inference_num_repeat = cfg.model.inference_num_repeat
if max_length < cfg.preprocessing.text.max_length:
inference_num_repeat = 1
cfg.defrost()
cfg.preprocessing.text.max_length = max_length
cfg.model.inference_num_repeat = inference_num_repeat
cfg.freeze()
with open(os.path.join(exp_dir, 'cfg.yml'), 'w') as f:
f.write(str(cfg))
logger.info(f'Max length for chunking text: {max_length}, '
f'Stochastic chunk: Train-{train_stochastic_chunk}/Test-{test_stochastic_chunk}, '
f'Test #repeat: {inference_num_repeat}.')
cls_id, sep_id = get_cls_sep_id(tokenizer)
train_batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(preprocessor.text_feature_names),
num_categorical_inputs=len(preprocessor.categorical_feature_names),
num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,
mode='train', stochastic_chunk=train_stochastic_chunk,
insert_sep=cfg.model.insert_sep)
test_batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(preprocessor.text_feature_names),
num_categorical_inputs=len(preprocessor.categorical_feature_names),
num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,
mode='test', stochastic_chunk=test_stochastic_chunk,
insert_sep=cfg.model.insert_sep)
# Get the ground-truth dev labels
gt_dev_labels = np.array([ele[-1] for ele in tuning_dataset])
if problem_type == REGRESSION:
gt_dev_labels = preprocessor.label_scaler.inverse_transform(np.expand_dims(gt_dev_labels,
axis=-1))[:, 0]
ctx_l = get_mxnet_available_ctx()
if ngpus_per_trial == 0:
ctx_l = [mx.cpu()]
else:
ctx_l = ctx_l[:ngpus_per_trial]
base_batch_size = cfg.optimization.per_device_batch_size
num_accumulated = int(np.ceil(cfg.optimization.batch_size / (base_batch_size * len(ctx_l))))
inference_base_batch_size = base_batch_size * cfg.optimization.val_batch_size_mult
train_dataloader = DataLoader(train_dataset,
batch_size=base_batch_size,
shuffle=True,
batchify_fn=train_batchify_fn)
dev_dataloader = DataLoader(tuning_dataset,
batch_size=inference_base_batch_size,
shuffle=False,
batchify_fn=test_batchify_fn)
if problem_type == REGRESSION:
out_shape = 1
elif problem_type == MULTICLASS:
out_shape = len(label_generator.classes_)
elif problem_type == BINARY:
assert len(label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(preprocessor.categorical_feature_names),
num_numerical_features=len(preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0 else len(preprocessor.numerical_feature_names),
num_categories=preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.initialize_with_pretrained_backbone(backbone_params_path, ctx=ctx_l)
net.hybridize()
num_total_params, num_total_fixed_params = count_parameters(net.collect_params())
logger.info('#Total Params/Fixed Params={}/{}'.format(num_total_params,
num_total_fixed_params))
# Initialize the optimizer
updates_per_epoch = int(np.ceil(len(train_dataloader) / (num_accumulated * len(ctx_l))))
optimizer, optimizer_params, max_update \
= get_optimizer(cfg.optimization,
updates_per_epoch=updates_per_epoch)
valid_interval = int(math.ceil(cfg.optimization.valid_frequency * updates_per_epoch))
train_log_interval = int(math.ceil(cfg.optimization.log_frequency * updates_per_epoch))
if 0 < cfg.optimization.layerwise_lr_decay < 1:
apply_layerwise_decay(net.text_backbone,
cfg.optimization.layerwise_lr_decay,
backbone_name=cfg.model.backbone.name)
freeze_layers(net.text_backbone,
backbone_name=cfg.model.backbone.name,
num_trainable_layers=cfg.model.num_trainable_layers)
# Do not apply weight decay to all the LayerNorm and bias
for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
params = [p for p in net.collect_params().values() if p.grad_req != 'null']
trainer = mx.gluon.Trainer(params,
optimizer, optimizer_params,
update_on_kvstore=False)
# Set grad_req if gradient accumulation is required
if num_accumulated > 1:
logger.log(15, 'Using gradient accumulation.'
' Global batch size = {}'.format(cfg.optimization.batch_size))
for p in params:
p.grad_req = 'add'
net.collect_params().zero_grad()
train_loop_dataloader = grouper(repeat(train_dataloader), len(ctx_l))
log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]
log_num_samples_l = [0 for _ in ctx_l]
logging_start_tick = time.time()
nbest = cfg.optimization.nbest
best_performance_score = [] # Stores the best performing checkpoints
best_performance_update_idx = [] # Stores the update index that reached the best validation performance
best_score = None
mx.npx.waitall()
no_better_rounds = 0
report_idx = 0
start_tick = time.time()
if time_limit is not None:
time_limit -= start_tick - time_start
if time_limit <= 0:
if not is_fake_reporter:
reporter.terminate()
return
best_report_items = None
report_local_jsonl_f = open(os.path.join(exp_dir, 'results_local.jsonl'), 'w')
logger.info(f'Local training results will be saved to '
f'{os.path.join(exp_dir, "results_local.jsonl")}.')
for update_idx in range(max_update):
for accum_idx in range(num_accumulated):
sample_l = next(train_loop_dataloader)
loss_l = []
for i, (sample, ctx) in enumerate(zip(sample_l, ctx_l)):
feature_batch, label_batch = sample
feature_batch = move_to_ctx(feature_batch, ctx)
label_batch = move_to_ctx(label_batch, ctx)
with mx.autograd.record():
pred = net(feature_batch)
if problem_type == MULTICLASS or problem_type == BINARY:
logits = mx.npx.log_softmax(pred, axis=-1)
loss = - mx.npx.pick(logits,
mx.np.expand_dims(label_batch, axis=-1))
elif problem_type == REGRESSION:
loss = mx.np.square(pred - mx.np.expand_dims(label_batch, axis=-1))
loss_l.append(loss.mean() / len(ctx_l) / num_accumulated)
log_loss_l[i] += loss_l[i] * len(ctx_l) * loss.shape[0] * num_accumulated
log_num_samples_l[i] += loss.shape[0]
for loss in loss_l:
loss.backward()
# Begin to update
trainer.allreduce_grads()
total_norm, ratio, is_finite = clip_grad_global_norm(params, cfg.optimization.max_grad_norm)
if not cfg.model._disable_update:
trainer.update(1.0, ignore_stale_grad=True)
# Clear after update
if num_accumulated > 1:
net.collect_params().zero_grad()
if (update_idx + 1) % train_log_interval == 0:
log_loss = sum([ele.as_in_ctx(ctx_l[0]) for ele in log_loss_l]).asnumpy()
log_num_samples = sum(log_num_samples_l)
logger.log(15,
'[Iter {}/{}, Epoch {}] train loss={:0.2e}, gnorm={:0.2e}, lr={:0.2e}, #samples processed={},'
' #sample per second={:.2f}. ETA={:.2f}min'
.format(update_idx + 1, max_update,
int(update_idx / updates_per_epoch),
log_loss / log_num_samples, total_norm, trainer.learning_rate,
log_num_samples,
log_num_samples / (time.time() - logging_start_tick),
(time.time() - start_tick) / (update_idx + 1)
* (max_update - update_idx - 1) / 60))
logging_start_tick = time.time()
log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]
log_num_samples_l = [0 for _ in ctx_l]
if (update_idx + 1) % valid_interval == 0 or (update_idx + 1) == max_update:
valid_start_tick = time.time()
dev_predictions = \
_classification_regression_predict(net,
dataloader=dev_dataloader,
problem_type=problem_type,
label_scaler=preprocessor.label_scaler,
has_label=False,
num_repeat=inference_num_repeat)
log_scores = [calculate_metric(scorer, gt_dev_labels,
dev_predictions,
problem_type)
for scorer in log_metric_scorers]
dev_score = calculate_metric(eval_metric_scorer, gt_dev_labels,
dev_predictions,
problem_type)
valid_time_spent = time.time() - valid_start_tick
find_better = False
find_topn_better = False
if len(best_performance_score) < nbest:
best_performance_score.append(dev_score)
best_performance_update_idx.append(update_idx + 1)
net.save_parameters(
os.path.join(exp_dir,
f'nbest_model{len(best_performance_score) - 1}.params'))
find_topn_better = True
if best_score is None or greater_is_better and dev_score >= best_score\
or (not greater_is_better and dev_score <= best_score):
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
else:
# First try to update the top-K
if greater_is_better:
if dev_score >= min(best_performance_score):
find_topn_better = True
replace_idx = np.argmin(best_performance_score)
best_performance_score[replace_idx] = dev_score
best_performance_update_idx[replace_idx] = update_idx + 1
net.save_parameters(
os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))
if dev_score >= best_score:
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
else:
if dev_score <= max(best_performance_score):
find_topn_better = True
replace_idx = np.argmax(best_performance_score)
best_performance_score[replace_idx] = dev_score
best_performance_update_idx[replace_idx] = update_idx + 1
net.save_parameters(
os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))
if dev_score <= best_score:
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
if not find_better:
no_better_rounds += 1
else:
no_better_rounds = 0
mx.npx.waitall()
loss_string = ', '.join(['{}={:0.4e}'.format(metric.name, score)
for score, metric in zip(log_scores, log_metric_scorers)])
logger.log(25, '[Iter {}/{}, Epoch {}] valid {}, time spent={:.3f}s,'
' total time spent={:.2f}min. Find new best={}, Find new top-{}={}'.format(
update_idx + 1, max_update, int(update_idx / updates_per_epoch),
loss_string, valid_time_spent, (time.time() - start_tick) / 60,
find_better, nbest, find_topn_better))
if reporter is not None:
report_items = [('iteration', update_idx + 1),
('report_idx', report_idx + 1),
('epoch', int(update_idx / updates_per_epoch))] + \
[(metric.name, score)
for score, metric in zip(log_scores, log_metric_scorers)] + \
[('find_better', find_better),
('find_new_topn', find_topn_better),
('nbest_stat', json.dumps([best_performance_score,
best_performance_update_idx])),
('elapsed_time', int(time.time() - start_tick))]
if eval_metric_scorer._sign < 0:
report_items.append(('reward_attr', -dev_score))
else:
report_items.append(('reward_attr', dev_score))
report_items.append(('eval_metric', eval_metric_scorer.name))
report_items.append(('exp_dir', exp_dir))
if find_better:
best_report_items = report_items
reporter(**dict(report_items))
report_local_jsonl_f.write(json.dumps(dict(report_items)) + '\n')
report_local_jsonl_f.flush()
report_idx += 1
if no_better_rounds >= cfg.optimization.early_stopping_patience:
logger.info('Early stopping patience reached!')
break
total_time_spent = time.time() - start_tick
if time_limit is not None and total_time_spent > time_limit:
break
# Average checkpoints
best_report_items_dict = dict(best_report_items)
best_report_items_dict['report_idx'] = report_idx + 1
reporter(**best_report_items_dict)
report_local_jsonl_f.write(json.dumps(best_report_items_dict) + '\n')
report_local_jsonl_f.close()
def get_recommended_resource(nthreads_per_trial=None,
ngpus_per_trial=None) -> Tuple[int, int]:
"""Get the recommended resources.
Internally, we will try to use GPU whenever it's possible. That means, we will use
a single GPU for finetuning.
Parameters
----------
nthreads_per_trial
The number of threads per trial provided by the user.
ngpus_per_trial
The number of GPUs per trial provided by the user.
Returns
-------
nthreads_per_trial
The recommended resource.
ngpus_per_trial
"""
if nthreads_per_trial is None and ngpus_per_trial is None:
nthreads_per_trial = get_cpu_count()
ngpus_per_trial = 1
elif nthreads_per_trial is not None and ngpus_per_trial is None:
ngpus_per_trial = 1
elif nthreads_per_trial is None and ngpus_per_trial is not None:
if ngpus_per_trial != 0:
num_parallel_jobs = get_gpu_count() // ngpus_per_trial
nthreads_per_trial = max(get_cpu_count() // num_parallel_jobs, 1)
else:
nthreads_per_trial = get_cpu_count()
nthreads_per_trial = min(nthreads_per_trial, get_cpu_count())
ngpus_per_trial = min(ngpus_per_trial, get_gpu_count())
assert nthreads_per_trial > 0 and ngpus_per_trial >= 0,\
'Invalid number of threads and number of GPUs.'
return nthreads_per_trial, ngpus_per_trial
@use_np
class MultiModalTextModel:
"""Learner of the multimodal text data.
It will be called if the user call `fit()` in TextPredictor.
It is used for making predictions on new data and viewing information about
models trained during `fit()`.
"""
def __init__(self, column_types,
feature_columns,
label_columns,
problem_type,
eval_metric,
log_metrics,
output_directory=None):
"""Creates model object.
Parameters
----------
column_types
The column types.
feature_columns
Name of the feature columns
label_columns
Name of the label columns.
problem_type
Type of the problem
eval_metric
The evaluation metric
log_metrics
The metrics for logging
output_directory
The output directory to save the model
logger
The logger
"""
super(MultiModalTextModel, self).__init__()
self._base_config = base_cfg()
self._base_config.defrost()
if output_directory is not None:
self._output_directory = self._base_config.misc.exp_dir = output_directory
self._base_config.misc.exp_dir = os.path.abspath(self._base_config.misc.exp_dir)
self._base_config.freeze()
self._output_directory = self._base_config.misc.exp_dir
self._column_types = column_types
self._eval_metric = eval_metric
self._log_metrics = log_metrics
self._label_columns = label_columns
self._feature_columns = feature_columns
self._problem_type = problem_type
# Need to be set in the train call
self._net = None # Network for training and inference
self._embed_net = None # Network for extract the embedding
self._config = None
self._results = None
self._preprocessor = None
@property
def results(self):
return self._results
@property
def preprocessor(self):
return self._preprocessor
@property
def output_directory(self):
""" Get the output directory. The trained model and the training logs
will be saved to this folder """
return self._output_directory
@property
def label_columns(self):
"""Name of the label columns"""
return self._label_columns
@property
def problem_type(self):
"""Types of the problem"""
return self._problem_type
@property
def feature_columns(self):
"""Name of the features"""
return self._feature_columns
@property
def base_config(self):
"""The basic configuration. Internally, we will fill values in the base config by values
in the search space."""
return self._base_config
@property
def results(self):
"""Results of the final model"""
return self._results
@property
def config(self):
"""The configuration of the final trained model."""
return self._config
@property
def net(self):
return self._net
def train(self, train_data, tuning_data,
num_cpus=None,
num_gpus=None,
time_limit=None,
tune_kwargs=None,
search_space=None,
plot_results=False,
console_log=True,
seed=None,
verbosity=2):
"""The train function.
Parameters
----------
train_data
The training data
tuning_data
The tuning data
num_cpus
Number of CPUs for each trial
num_gpus
Number of GPUs for each trial
time_limit
The time limits
tune_kwargs
Parameters of the HPO algorithms. For example, the scheduling
algorithm, scheduling backend, HPO algorithm.
search_space
The search space options
plot_results
Whether to plot results or not
console_log
Whether to log into the console
seed
The seed
verbosity
Verbosity
"""
set_seed(seed)
set_logger_verbosity(verbosity, logger)
start_tick = time.time()
assert len(self._label_columns) == 1, 'Currently, we only support single label.'
# TODO(sxjscience) Try to support S3
os.makedirs(self._output_directory, exist_ok=True)
if search_space is None:
search_space = \
ag_text_presets.create('default')['models']['MultimodalTextModel']['search_space']
search_space_reg = args(search_space=space.Dict(**search_space))
# Scheduler and searcher for HPO
if tune_kwargs is None:
tune_kwargs = ag_text_presets.create('default')['tune_kwargs']
scheduler_options = tune_kwargs['scheduler_options']
num_cpus, num_gpus = get_recommended_resource(num_cpus, num_gpus)
if num_gpus == 0:
if 'AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU' in os.environ:
use_warning = int(os.environ['AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU'])
else:
use_warning = False
if use_warning:
warnings.warn('No GPU is detected in the machine and we will recommend you to '
'use TextPredictor on a GPU-enabled instance. Currently, '
'training on CPU is slow.')
else:
raise RuntimeError('No GPU is detected in the machine and we will '
'not proceed to run TextPredictor because they will train '
'too slowly with only CPU. You may try to set `ngpus_per_trial` '
'to a number larger than 0 when calling `.fit()`. '
'Also, you can set the environment variable '
'"AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU=1" to force the model to '
'use CPU for training.')
logger.info(f"The GluonNLP V0 backend is used. "
f"We will use {num_cpus} cpus and "
f"{num_gpus} gpus to train each trial.")
if scheduler_options is None:
scheduler_options = dict()
if plot_results is None:
if in_ipynb():
plot_results = True
else:
plot_results = False
scheduler_options = compile_scheduler_options_v2(
scheduler_options=scheduler_options,
scheduler=tune_kwargs['search_strategy'],
search_strategy=tune_kwargs['searcher'],
search_options=tune_kwargs['search_options'],
nthreads_per_trial=num_cpus,
ngpus_per_trial=num_gpus,
checkpoint=os.path.join(self._output_directory, 'checkpoint.ag'),
num_trials=tune_kwargs['num_trials'],
time_out=time_limit,
resume=False,
visualizer=scheduler_options.get('visualizer'),
time_attr='report_idx',
reward_attr='reward_attr',
dist_ip_addrs=scheduler_options.get('dist_ip_addrs'))
# Create a temporary cache file. The internal train function will load the
# temporary cache.
os.makedirs(os.path.join(self._output_directory, 'data_cache'), exist_ok=True)
train_df_path = os.path.join(self._output_directory, 'data_cache',
'cache_train_dataframe.pd.pkl')
tuning_df_path = os.path.join(self._output_directory, 'data_cache',
'cache_tuning_dataframe.pd.pkl')
train_data.to_pickle(train_df_path)
tuning_data.to_pickle(tuning_df_path)
train_fn = search_space_reg(functools.partial(train_function,
train_df_path=train_df_path,
time_limit=time_limit,
time_start=start_tick,
tuning_df_path=tuning_df_path,
base_config=self.base_config,
problem_type=self.problem_type,
column_types=self._column_types,
feature_columns=self._feature_columns,
label_column=self._label_columns[0],
log_metrics=self._log_metrics,
eval_metric=self._eval_metric,
ngpus_per_trial=scheduler_options['resource']['num_gpus'],
console_log=console_log,
verbosity=verbosity))
no_job_finished_err_msg =\
'No training job has been completed! '\
'There are two possibilities: '\
'1) The time_limit is too small, '\
'or 2) There are some internal errors in AutoGluon. '\
'For the first case, you can increase the time_limit or set it to '\
'None, e.g., setting "predictor.fit(..., time_limit=None). To '\
'further investigate the root cause, you can also try to set the '\
'"verbosity=3" and try again, i.e., predictor.set_verbosity(3).'
if scheduler_options['num_trials'] == 1:
train_fn(train_fn.args['search_space'],
train_fn.args['_default_config'])
best_model_saved_dir_path = os.path.join(self._output_directory, 'task0')
cfg_path = os.path.join(self._output_directory, 'task0', 'cfg.yml')
# Check whether the job has finished
if not os.path.exists(cfg_path)\
or not os.path.exists(os.path.join(self._output_directory,
'task0', 'best_model.params')):
raise RuntimeError(no_job_finished_err_msg)
cfg = self.base_config.clone_merge(cfg_path)
local_results = pd.read_json(os.path.join(self._output_directory, 'task0',
'results_local.jsonl'), lines=True)
if plot_results:
plot_training_curves = os.path.join(self._output_directory,
'plot_training_curves.png')
import matplotlib.pyplot as plt
plt.ylabel(self._eval_metric)
plt.xlabel('report_idx')
plt.title("Performance vs Training-Time")
plt.plot(local_results['report_idx'].iloc[:-1],
local_results[local_results['eval_metric'][0]].iloc[:-1], label=f'task0')
plt.legend(loc='best')
plt.savefig(plot_training_curves)
plt.show()
self._results = local_results
else:
if tune_kwargs['search_strategy'] != 'local':
# Force forkserver if it's not using the local sequential HPO
force_forkserver()
scheduler_cls, scheduler_params = scheduler_factory(scheduler_options)
# Create scheduler, run HPO experiment
scheduler = scheduler_cls(train_fn, **scheduler_options)
scheduler.run()
scheduler.join_jobs()
if len(scheduler.config_history) == 0:
raise RuntimeError(no_job_finished_err_msg)
best_config = scheduler.get_best_config()
logger.info('Results=', scheduler.searcher._results)
logger.info('Best_config={}'.format(best_config))
best_task_id = scheduler.get_best_task_id()
best_model_saved_dir_path = os.path.join(self._output_directory,
'task{}'.format(best_task_id))
best_cfg_path = os.path.join(best_model_saved_dir_path, 'cfg.yml')
cfg = self.base_config.clone_merge(best_cfg_path)
if plot_results:
plot_training_curves = os.path.join(self._output_directory,
'plot_training_curves.png')
scheduler.get_training_curves(filename=plot_training_curves,
plot=plot_results,
use_legend=True)
self._results = dict()
self._results.update(best_reward=scheduler.get_best_reward(),
best_config=scheduler.get_best_config(),
total_time=time.time() - start_tick,
metadata=scheduler.metadata,
training_history=scheduler.training_history,
config_history=scheduler.config_history,
reward_attr=scheduler._reward_attr,
config=cfg)
# Consider to move this to a separate predictor
self._config = cfg
# Average parameters
# TODO(sxjscience) Clean up the temporary spaces used to store the intermediate checkpoints.
if cfg.model.use_avg_nbest:
nbest_path_l = []
for best_id in range(cfg.optimization.nbest):
nbest_path = os.path.join(best_model_saved_dir_path, f'nbest_model{best_id}.params')
if os.path.exists(nbest_path):
nbest_path_l.append(nbest_path)
avg_nbest_path = os.path.join(best_model_saved_dir_path, 'nbest_model_avg.params')
average_checkpoints(nbest_path_l, avg_nbest_path)
with open(os.path.join(best_model_saved_dir_path, 'preprocessor.pkl'), 'rb') as in_f:
self._preprocessor = pickle.load(in_f)
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
if self._problem_type == REGRESSION:
out_shape = 1
elif self._problem_type == MULTICLASS:
out_shape = len(self._preprocessor.label_generator.classes_)
elif self._problem_type == BINARY:
assert len(self._preprocessor.label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(self._preprocessor.categorical_feature_names),
num_numerical_features=len(self._preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(self._preprocessor.numerical_feature_names) == 0 else len(
self._preprocessor.numerical_feature_names),
num_categories=self._preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.hybridize()
if cfg.model.use_avg_nbest:
net.load_parameters(avg_nbest_path, ctx=mx.cpu())
else:
net.load_parameters(os.path.join(best_model_saved_dir_path, 'best_model.params'),
ctx=mx.cpu())
self._net = net
mx.npx.waitall()
def evaluate(self, data, metrics=None, stochastic_chunk=None, num_repeat=None):
""" Report the predictive performance evaluated for a given dataset.
Parameters
----------
data : str or :class:`TabularDataset` or `pandas.DataFrame`
This Dataset must also contain the label-column with the same column-name as specified during `fit()`.
If str is passed, `valid_data` will be loaded using the str value as the file path.
metrics : str or List[str] or None
Name of metric or a list of names of metrics to report.
If it is not given, we will return the score of the stored eval_metric.
stochastic_chunk
Whether to use stochastic chunk
num_repeat
The number of repeats
Returns
-------
ret : single number or a dict of metric --> metric scores
Output
"""
if isinstance(metrics, str):
metrics = [metrics]
elif metrics is None:
metrics = [self._eval_metric]
assert self.net is not None
# We will always use all resources that are available for evaluation
ctx_l = get_mxnet_available_ctx()
self.net.collect_params().reset_ctx(ctx_l)
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
data = data[self._feature_columns + self._label_columns]
if self._problem_type == MULTICLASS or self._problem_type == BINARY:
ground_truth = self.preprocessor.label_generator.transform(
data[self._label_columns[0]])
predictions = self.predict_proba(data,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
else:
ground_truth = pd.to_numeric(data[self._label_columns[0]]).to_numpy().astype(np.float32)
predictions = self.predict(data,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
metric_scores = [calculate_metric(get_metric(metric),
ground_truth, predictions, self._problem_type)
for metric in metrics]
# Once the inference is completed, we will cache all parameters back
# to CPU to avoid memory overflow.
self.net.collect_params().reset_ctx(mx.cpu())
if len(metric_scores) == 1:
return metric_scores[0]
else:
return {metric: score for metric, score in zip(metrics, metric_scores)}
def _internal_predict(self, data, get_original_labels=True, get_probabilities=False,
stochastic_chunk=None, num_repeat=None):
assert self.net is not None
assert self.config is not None
# We will always use all resources that are available for evaluation
ctx_l = get_mxnet_available_ctx()
self.net.collect_params().reset_ctx(ctx_l)
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
dataset = self.preprocessor.transform(data[self._feature_columns])
inference_batch_size = self.config.optimization.per_device_batch_size \
* self.config.optimization.val_batch_size_mult
cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)
if stochastic_chunk is None:
stochastic_chunk = self.config.model.test_stochastic_chunk
batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(self.preprocessor.text_feature_names),
num_categorical_inputs=len(self.preprocessor.categorical_feature_names),
num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id,
max_length=self.config.preprocessing.text.max_length,
mode='test',
stochastic_chunk=stochastic_chunk,
insert_sep=self.config.model.insert_sep)
dataloader = DataLoader(dataset,
batch_size=inference_batch_size,
shuffle=False,
batchify_fn=batchify_fn)
if num_repeat is None:
num_repeat = self.config.model.inference_num_repeat
test_predictions = _classification_regression_predict(
self._net,
dataloader=dataloader,
problem_type=self._problem_type,
label_scaler=self.preprocessor.label_scaler,
has_label=False,
num_repeat=num_repeat)
# Once the inference is completed, we will cache all parameters back
# to CPU to avoid memory overflow.
self.net.collect_params().reset_ctx(mx.cpu())
if self._problem_type == MULTICLASS or self._problem_type == BINARY:
if get_probabilities:
return test_predictions
else:
test_predictions = test_predictions.argmax(axis=-1)
if get_original_labels:
test_predictions = np.array(
self.preprocessor.label_generator.inverse_transform(test_predictions))
return test_predictions
@property
def class_labels(self):
"""The original name of the class labels.
For example, the tabular data may contain classes equal to
"entailment", "contradiction", "neutral". Internally, these will be converted to
0, 1, 2, ...
This function returns the original names of these raw labels.
Returns
-------
ret
List that contain the class names. It will be None if it's not a classification problem.
"""
if self.problem_type == MULTICLASS or self.problem_type == BINARY:
return self._preprocessor.label_generator.classes_
else:
warnings.warn('Accessing class names for a non-classification problem. Return None.')
return None
def predict_proba(self, test_data, stochastic_chunk=None, num_repeat=None):
"""Predict class probabilities instead of class labels (for classification tasks).
Parameters
----------
test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str
The test data to get predictions for. Can be DataFrame/Dataset or a file that can
be loaded into DataFrame/Dataset.
stochastic_chunk : bool
Whether to enable stochastic chunk
num_repeat : int or None
The number of repeats for running the inference model.
Returns
-------
probabilities : array
The predicted class probabilities for each sample.
Shape of this array is (#Samples, num_class).
Here, the i-th number means the probability of belonging to the i-th class.
You can access the class names by calling `self.class_names`.
"""
assert self.problem_type == MULTICLASS or self.problem_type == BINARY
return self._internal_predict(test_data,
get_original_labels=False,
get_probabilities=True,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
def predict(self, test_data, get_original_labels=True, stochastic_chunk=None, num_repeat=None):
"""Make predictions on new data.
Parameters
----------
test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str
The test data to get predictions for. Can be DataFrame/Dataset or a file that can be loaded into DataFrame/Dataset.
get_original_labels : bool, default = True
Whether or not predictions should be formatted in terms of the original labels.
For example, the labels might be "entailment" or "not_entailment" and predictions could either be of this form (if `True`) or integer-indices corresponding to these classes (if `False`).
stochastic_chunk : bool or None, default = None
Whether to turn on stochastic chunk
num_repeat : int or None
The number of repeats
Returns
-------
predictions : array
The predictions for each sample. Shape of this array is (#Samples,).
"""
return self._internal_predict(test_data,
get_original_labels=get_original_labels,
get_probabilities=False,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
def save(self, dir_path):
"""Save this model to disk.
Parameters
----------
dir_path : str
Directory where the model should be saved.
"""
os.makedirs(dir_path, exist_ok=True)
self.net.save_parameters(os.path.join(dir_path, 'net.params'))
with open(os.path.join(dir_path, 'cfg.yml'), 'w') as of:
of.write(self.config.dump())
# Save preprocessor
with open(os.path.join(dir_path, 'preprocessor.pkl'), 'wb') as of:
pickle.dump(self.preprocessor, of)
if not isinstance(self._eval_metric, str):
eval_metric = self._eval_metric.name
else:
eval_metric = self._eval_metric
log_metrics = []
for metric in self._log_metrics:
if not isinstance(metric, str):
log_metrics.append(metric.name)
else:
log_metrics.append(metric)
# Save additional assets about the parsed dataset information
with open(os.path.join(dir_path, 'assets.json'), 'w') as of:
json.dump(
{
'problem_type': self._problem_type,
'label_columns': self._label_columns,
'eval_metric': eval_metric,
'log_metrics': log_metrics,
'feature_columns': self._feature_columns,
'column_types': self._column_types,
'version': version.__version__,
}, of, ensure_ascii=True)
@classmethod
def load(cls, dir_path: str):
"""Load a model object previously produced by `fit()` from disk and return this object.
It is highly recommended the predictor be loaded with the exact AutoGluon version
it was fit with.
Parameters
----------
dir_path
Path to directory where this model was previously saved.
Returns
-------
model
A `BertForTextPredictionBasic` object that can be used for making predictions on new data.
"""
cfg = base_cfg().clone_merge(os.path.join(dir_path, 'cfg.yml'))
with open(os.path.join(dir_path, 'preprocessor.pkl'), 'rb') as in_f:
preprocessor = pickle.load(in_f)
with open(os.path.join(dir_path, 'assets.json'), 'r') as f:
assets = json.load(f)
label_columns = assets['label_columns']
feature_columns = assets['feature_columns']
eval_metric = assets['eval_metric']
log_metrics = assets['log_metrics']
problem_type = assets['problem_type']
column_types = assets['column_types']
# TODO(sxjscience) Post 0.1. In general, we will need to support compatible version check
version = assets['version']
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
if problem_type == REGRESSION:
out_shape = 1
elif problem_type == MULTICLASS:
out_shape = len(preprocessor.label_generator.classes_)
elif problem_type == BINARY:
assert len(preprocessor.label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(preprocessor.categorical_feature_names),
num_numerical_features=len(preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0
else len(preprocessor.numerical_feature_names),
num_categories=preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.hybridize()
ctx_l = mx.cpu()
net.load_parameters(os.path.join(dir_path, 'net.params'), ctx=ctx_l)
model = cls(column_types=column_types,
label_columns=label_columns,
feature_columns=feature_columns,
problem_type=problem_type,
eval_metric=eval_metric,
log_metrics=log_metrics)
model._net = net
model._config = cfg
model._preprocessor = preprocessor
return model
def extract_embedding(self, data, stochastic_chunk=None, num_repeat=None):
"""Extract the embedding from the pretrained model.
Parameters
----------
data
Data that can be parsed to pandas dataframe
stochastic_chunk
Whether to use stochastic chunk
num_repeat
The number of repeats
Returns
-------
embeddings
The output embeddings will have shape
(#samples, embedding_dim)
"""
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
dataset = self.preprocessor.transform(data[self.feature_columns])
inference_batch_size = self.config.optimization.per_device_batch_size \
* self.config.optimization.val_batch_size_mult
cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)
if stochastic_chunk is None:
stochastic_chunk = self.config.model.test_stochastic_chunk
batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(self.preprocessor.text_feature_names),
num_categorical_inputs=len(self.preprocessor.categorical_feature_names),
num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id,
max_length=self.config.preprocessing.text.max_length,
mode='test',
stochastic_chunk=stochastic_chunk,
insert_sep=self.config.model.insert_sep)
dataloader = DataLoader(dataset,
batch_size=inference_batch_size,
shuffle=False,
batchify_fn=batchify_fn)
if self._embed_net is None:
embed_net = MultiModalWithPretrainedTextNN(
text_backbone=self.net.text_backbone,
num_text_features=1,
num_categorical_features=len(self.preprocessor.categorical_feature_names),
num_numerical_features=len(self.preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(self.preprocessor.numerical_feature_names) == 0
else len(self.preprocessor.numerical_feature_names),
num_categories=self.preprocessor.categorical_num_categories,
get_embedding=True,
cfg=self.config.model.network,
out_shape=self.net.out_shape,
params=self.net.collect_params(),
prefix='embed_net_')
embed_net.hybridize()
self._embed_net = embed_net
if num_repeat is None:
num_repeat = self.config.model.inference_num_repeat
ctx_l = get_mxnet_available_ctx()
self._embed_net.collect_params().reset_ctx(ctx_l)
embeddings = _classification_regression_predict(self._embed_net,
dataloader=dataloader,
problem_type=self._problem_type,
label_scaler=self.preprocessor.label_scaler,
has_label=False,
extract_embedding=True,
num_repeat=num_repeat)
self._embed_net.collect_params().reset_ctx(mx.cpu())
return embeddings
|
[] |
[] |
[
"AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU"
] |
[]
|
["AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU"]
|
python
| 1 | 0 | |
data/main.go
|
package data
import (
"fmt"
"net/http"
"os"
"strings"
)
const requestTimeout int = 15
var httpClient *http.Client
var headers map[string]string
var baseURL string
var apiKey string
var emailTo []string
var emailFrom string
var emailHost string
var emailPort string
// Ensure required environment variables are present.
func init() {
baseURL = os.Getenv("NMS_URL")
if baseURL == "" {
fmt.Println("Unable to find required NMS_URL environment variable.")
os.Exit(1)
}
apiKey = os.Getenv("NMS_API_KEY")
if apiKey == "" {
fmt.Println("Unable to find required NMS_API_KEY environment variable.")
os.Exit(1)
}
emailToEnv := os.Getenv("POWER_REPORT_EMAIL_TO")
if emailToEnv == "" {
fmt.Println("Unable to find required POWER_REPORT_EMAIL_TO environment variable.")
os.Exit(1)
} else {
emailTo = strings.Split(emailToEnv, ",")
}
emailFrom = os.Getenv("POWER_REPORT_EMAIL_FROM")
if emailFrom == "" {
fmt.Println("Unable to find required POWER_REPORT_EMAIL_FROM environment variable.")
os.Exit(1)
}
emailHost = os.Getenv("POWER_REPORT_EMAIL_HOST")
if emailHost == "" {
fmt.Println("Unable to find required POWER_REPORT_EMAIL_HOST environment variable.")
os.Exit(1)
}
emailPort = os.Getenv("POWER_REPORT_EMAIL_PORT")
if emailPort == "" {
fmt.Println("Unable to find required POWER_REPORT_EMAIL_PORT environment variable.")
os.Exit(1)
}
// Initialize the HTTP client.
httpClient = createClient()
}
|
[
"\"NMS_URL\"",
"\"NMS_API_KEY\"",
"\"POWER_REPORT_EMAIL_TO\"",
"\"POWER_REPORT_EMAIL_FROM\"",
"\"POWER_REPORT_EMAIL_HOST\"",
"\"POWER_REPORT_EMAIL_PORT\""
] |
[] |
[
"POWER_REPORT_EMAIL_TO",
"POWER_REPORT_EMAIL_FROM",
"POWER_REPORT_EMAIL_HOST",
"NMS_API_KEY",
"NMS_URL",
"POWER_REPORT_EMAIL_PORT"
] |
[]
|
["POWER_REPORT_EMAIL_TO", "POWER_REPORT_EMAIL_FROM", "POWER_REPORT_EMAIL_HOST", "NMS_API_KEY", "NMS_URL", "POWER_REPORT_EMAIL_PORT"]
|
go
| 6 | 0 | |
src/server.go
|
package main
import (
"log"
"os"
"github.com/docopt/docopt.go"
"github.com/go-martini/martini"
"github.com/martini-contrib/binding"
"github.com/martini-contrib/render"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
)
// Note contains a title and content.
type note struct {
ID bson.ObjectId `json:"id" bson:"_id"`
Subject string `json:"subject" bson:"subject" binding:"required"`
Title string `json:"title" bson:"title" binding:"required"`
Content string `json:"content" bson:"content"`
}
type image struct {
ID bson.ObjectId `json:"id" bson:"_id"`
NoteID bson.ObjectId `json:"note_id" bson:"note_id"`
Type string `json:"Type" bson:"type" binding:"required"`
Content string `json:"Content" bson:"content" binding:"required"`
}
type imagelist struct {
ID bson.ObjectId `json:"id" bson:"_id"`
}
type subject struct {
Subject string `json:"subject" bson:"subject"`
}
type query struct {
Query string `json:"query" bson:"query"`
Subject []string `json:"subject" bson:"subject"`
Show int `json:"show" bson:"show"`
}
// Return object for title query
type titlelist struct {
ID bson.ObjectId `json:"id" bson:"_id"`
Title string `json:"title" bson:"title"`
}
// App returns the ClassicMartini application.
func App() *martini.ClassicMartini {
m := martini.Classic()
m.Use(DB())
m.Use(render.Renderer())
m.Use(martini.Static("app"))
m.Post("/search", binding.Bind(query{}), func(q query, r render.Render, log *log.Logger, db *mgo.Database) {
if q.Query == "" {
r.JSON(400, map[string]interface{}{"error": "query required"})
return
}
n := []note{}
if len(q.Subject) > 0 {
search := bson.M{
"$and": []bson.M{
bson.M{"subject": bson.M{"$in": q.Subject}},
bson.M{"$or": []bson.M{
bson.M{"title": bson.M{"$regex": q.Query, "$options": "i"}},
bson.M{"content": bson.M{"$regex": q.Query, "$options": "i"}},
}},
},
}
err := db.C("notes").Find(search).All(&n)
if err != nil {
r.JSON(500, nil)
}
} else {
search := []bson.M{
bson.M{"title": bson.M{"$regex": q.Query, "$options": "i"}},
bson.M{"content": bson.M{"$regex": q.Query, "$options": "i"}},
}
err := db.C("notes").Find(bson.M{"$or": search}).All(&n)
if err != nil {
r.JSON(500, nil)
}
}
total := len(n)
beg := 0
end := 10
if q.Show > 0 {
end += 10
}
if end > total {
end = total
}
r.JSON(200, map[string]interface{}{"count": total, "skip": beg, "results": n[beg:end]})
})
m.Group("/notes", func(r martini.Router) {
r.Post("", binding.Bind(note{}), addNote)
r.Post("/subject", binding.Bind(subject{}), getNotesBySubject)
r.Post("/(.*)", noteNotFound)
r.Get("/subject/:sub", getNotesByTitlelist)
r.Get("/sublist", getSubList)
r.Get("/:id", getNote)
r.Get("(.*)", noteNotFound)
r.Put("/:id", binding.Bind(note{}), updateNote)
r.Put("(.*)", noteNotFound)
r.Delete("/:id", deleteNote)
r.Delete("(.*)", noteNotFound)
})
m.Group("/img", func(r martini.Router) {
r.Post("/:id", binding.Bind(image{}), addIMG)
r.Post("/(.*)", noteNotFound)
r.Get("/:id/list", getIMGList)
r.Get("/:id/content", getIMGContent)
r.Get("/:id", getIMG)
r.Get("(.*)", noteNotFound)
r.Delete("/:id", deleteIMG)
r.Delete("(.*)", noteNotFound)
})
m.NotFound(func(r render.Render) {
r.Redirect("/", 302)
})
return m
}
// DB clones a mongodb session and maps it to the current context.
func DB() martini.Handler {
session, err := mgo.Dial(os.Getenv("PAM_MONGO_URL"))
if err != nil {
log.Fatal(err)
}
return func(c martini.Context) {
s := session.Clone()
c.Map(s.DB("pam"))
defer s.Close()
c.Next()
}
}
func main() {
arguments, err := docopt.Parse(usage, nil, true, "pam 2.2.1", false)
if err != nil {
log.Fatal("Error parsing usage. Error: ", err.Error())
}
err = os.Setenv("HOST", arguments["--bind_ip"].(string))
if err != nil {
log.Fatal(err.Error())
}
err = os.Setenv("PORT", arguments["--bind_port"].(string))
if err != nil {
log.Fatal(err.Error())
}
App().Run()
}
|
[
"\"PAM_MONGO_URL\""
] |
[] |
[
"PAM_MONGO_URL"
] |
[]
|
["PAM_MONGO_URL"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/gcsomoza/goclone/cp"
)
const Help = `USAGE: goclone -s directory -d directory [-o --go-only]
This tool allows you to copy a go project to another directory
eliminating the hassle to modify your imports manually!
Clone and build your project straight away!
Arguments:
-s, --source Source directory.
-d, --destination Destination directory.
-o, --overwrite Overwrite destination directory.
--go-only Clone .go files only.
Example:
goclone -s github.com/gcsomoza/hello -d github.com/gcsomoza/hello_world -o
`
var OLD string
var NEW string
func main() {
isError := false
isOverwrite := false
isGoOnly := false
args := os.Args
n := len(args)
if n < 5 {
printHelp("Invalid number of arguments.")
isError = true
}
source := ""
destination := ""
if !isError {
source, destination, isError = parseArgs(args)
if n > 5 {
isOverwrite = hasOverwriteArg(args)
isGoOnly = hasGoOnlyArg(args)
}
}
goPath := os.Getenv("GOPATH")
if !isError {
if goPath == "" {
printHelp("Your GOPATH environment variable is not set.")
isError = true
}
}
src := ""
dst := ""
hasGitignore := false
pathToGitignore := ""
if !isError {
src = filepath.Join(goPath, "src", source)
dst = filepath.Join(goPath, "src", destination)
if _, err := os.Stat(src); os.IsNotExist(err) {
printHelp(source + " does not exist")
isError = true
} else {
pathToGitignore = filepath.Join(src, ".gitignore")
if _, err := os.Stat(pathToGitignore); !os.IsNotExist(err) {
hasGitignore = true
}
}
if _, err := os.Stat(dst); !os.IsNotExist(err) {
// Destination already exist
if !isOverwrite {
fmt.Println("ERROR: " + dst + " already exist.")
isError = true
}
}
}
if !isError {
fmt.Println("Cloning", source, "to", destination)
if hasGitignore {
fmt.Println("INFO: .gitignore detected. Ignoring files written in .gitignore")
cp.SetGitignore(pathToGitignore)
cp.SetIsGoOnly(isGoOnly)
}
err := cp.Copy(src, dst)
if err != nil {
fmt.Println("ERROR:", err)
isError = true
}
}
if !isError {
OLD = source
NEW = destination
err := filepath.Walk(dst, visit)
if err != nil {
fmt.Println("ERROR:", err)
isError = true
}
}
if !isError {
fmt.Println("Clone successful!")
}
}
func inSlice(needle string, haystack []string) int {
for i, hay := range haystack {
if hay == needle {
return i
}
}
return -1
}
func printHelp(msg string) {
fmt.Println("ERROR: " + msg)
fmt.Println("")
fmt.Println(Help)
}
func parseArgs(args []string) (string, string, bool) {
params := make(map[string]string)
params["source"] = ""
params["destination"] = ""
key := ""
for i, arg := range args {
if i == 1 || i == 3 {
if arg == "-s" || arg == "--source" {
key = "source"
} else if arg == "-d" || arg == "--destination" {
key = "destination"
} else {
printHelp("Invalid arguments.")
return "", "", true
break
}
} else if i == 2 || i == 4 {
params[key] = arg
}
}
if params["source"] == "" || params["destination"] == "" {
return "", "", true
}
return params["source"], params["destination"], false
}
func hasOverwriteArg(args []string) bool {
if inSlice("-o", args) > -1 {
return true
}
if inSlice("--overwrite", args) > -1 {
return true
}
return false
}
func hasGoOnlyArg(args []string) bool {
if inSlice("--go-only", args) > -1 {
return true
}
return false
}
func visit(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !!fi.IsDir() {
return nil //
}
matched, err := filepath.Match("*.go", fi.Name())
if err != nil {
panic(err)
return err
}
if matched {
read, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
//fmt.Println(string(read))
//fmt.Println(path)
newContents := strings.Replace(string(read), OLD, NEW, -1)
//fmt.Println(fi.Mode())
//fmt.Println(newContents)
err = ioutil.WriteFile(path, []byte(newContents), fi.Mode())
if err != nil {
panic(err)
}
}
return nil
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
contrib/mesos/pkg/scheduler/service/service.go
|
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"bufio"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
etcd "github.com/coreos/etcd/client"
"github.com/gogo/protobuf/proto"
log "github.com/golang/glog"
"github.com/kardianos/osext"
"github.com/mesos/mesos-go/auth"
"github.com/mesos/mesos-go/auth/sasl"
"github.com/mesos/mesos-go/auth/sasl/mech"
mesos "github.com/mesos/mesos-go/mesosproto"
mutil "github.com/mesos/mesos-go/mesosutil"
bindings "github.com/mesos/mesos-go/scheduler"
"github.com/pborman/uuid"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/pflag"
"golang.org/x/net/context"
"k8s.io/kubernetes/contrib/mesos/pkg/election"
execcfg "k8s.io/kubernetes/contrib/mesos/pkg/executor/config"
"k8s.io/kubernetes/contrib/mesos/pkg/hyperkube"
minioncfg "k8s.io/kubernetes/contrib/mesos/pkg/minion/config"
"k8s.io/kubernetes/contrib/mesos/pkg/podutil"
"k8s.io/kubernetes/contrib/mesos/pkg/profile"
"k8s.io/kubernetes/contrib/mesos/pkg/runtime"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/algorithm/podschedulers"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/framework"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/framework/frameworkid"
frameworkidEtcd "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/framework/frameworkid/etcd"
frameworkidZk "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/framework/frameworkid/zk"
schedcfg "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/config"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/executorinfo"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/ha"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/mesos"
controllerfw "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/healthz"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/util/sets"
// lock to this API version, compilation will fail when this becomes unsupported
_ "k8s.io/kubernetes/pkg/api/v1"
)
const (
defaultMesosMaster = "localhost:5050"
defaultMesosUser = "root" // should have privs to execute docker and iptables commands
defaultFrameworkRoles = "*"
defaultPodRoles = "*"
defaultReconcileInterval = 300 // 5m default task reconciliation interval
defaultReconcileCooldown = 15 * time.Second
defaultNodeRelistPeriod = 5 * time.Minute
defaultFrameworkName = "Kubernetes"
defaultExecutorCPUs = resources.CPUShares(0.25) // initial CPU allocated for executor
defaultExecutorMem = resources.MegaBytes(128.0) // initial memory allocated for executor
defaultExecutorInfoCacheSize = 10000
)
type SchedulerServer struct {
port int
address net.IP
enableProfiling bool
kubeconfig string
kubeAPIQPS float32
kubeAPIBurst int
apiServerList []string
etcdServerList []string
allowPrivileged bool
executorPath string
proxyPath string
mesosMaster string
mesosUser string
frameworkRoles []string
defaultPodRoles []string
mesosAuthPrincipal string
mesosAuthSecretFile string
mesosCgroupPrefix string
mesosExecutorCPUs resources.CPUShares
mesosExecutorMem resources.MegaBytes
checkpoint bool
failoverTimeout float64
generateTaskDiscovery bool
frameworkStoreURI string
executorLogV int
executorBindall bool
executorSuicideTimeout time.Duration
launchGracePeriod time.Duration
kubeletEnableDebuggingHandlers bool
runProxy bool
proxyBindall bool
proxyLogV int
proxyMode string
minionPathOverride string
minionLogMaxSize resource.Quantity
minionLogMaxBackups int
minionLogMaxAgeInDays int
mesosAuthProvider string
driverPort uint
hostnameOverride string
reconcileInterval int64
reconcileCooldown time.Duration
defaultContainerCPULimit resources.CPUShares
defaultContainerMemLimit resources.MegaBytes
schedulerConfigFileName string
graceful bool
frameworkName string
frameworkWebURI string
ha bool
advertisedAddress string
serviceAddress net.IP
haDomain string
kmPath string
clusterDNS net.IP
clusterDomain string
kubeletRootDirectory string
kubeletDockerEndpoint string
kubeletPodInfraContainerImage string
kubeletCadvisorPort uint
kubeletHostNetworkSources string
kubeletSyncFrequency time.Duration
kubeletNetworkPluginName string
staticPodsConfigPath string
dockerCfgPath string
containPodResources bool
nodeRelistPeriod time.Duration
sandboxOverlay string
conntrackMax int
conntrackTCPTimeoutEstablished int
useHostPortEndpoints bool
executable string // path to the binary running this service
client *clientset.Clientset
driver bindings.SchedulerDriver
driverMutex sync.RWMutex
mux *http.ServeMux
}
// useful for unit testing specific funcs
type schedulerProcessInterface interface {
End() <-chan struct{}
Failover() <-chan struct{}
Terminal() <-chan struct{}
}
// NewSchedulerServer creates a new SchedulerServer with default parameters
func NewSchedulerServer() *SchedulerServer {
s := SchedulerServer{
port: ports.SchedulerPort,
address: net.ParseIP("127.0.0.1"),
failoverTimeout: time.Duration((1 << 62) - 1).Seconds(),
frameworkStoreURI: "etcd://",
kubeAPIQPS: 50.0,
kubeAPIBurst: 100,
runProxy: true,
executorSuicideTimeout: execcfg.DefaultSuicideTimeout,
launchGracePeriod: execcfg.DefaultLaunchGracePeriod,
defaultContainerCPULimit: resources.DefaultDefaultContainerCPULimit,
defaultContainerMemLimit: resources.DefaultDefaultContainerMemLimit,
proxyMode: "userspace", // upstream default is "iptables" post-v1.1
minionLogMaxSize: minioncfg.DefaultLogMaxSize(),
minionLogMaxBackups: minioncfg.DefaultLogMaxBackups,
minionLogMaxAgeInDays: minioncfg.DefaultLogMaxAgeInDays,
mesosAuthProvider: sasl.ProviderName,
mesosCgroupPrefix: minioncfg.DefaultCgroupPrefix,
mesosMaster: defaultMesosMaster,
mesosUser: defaultMesosUser,
mesosExecutorCPUs: defaultExecutorCPUs,
mesosExecutorMem: defaultExecutorMem,
frameworkRoles: strings.Split(defaultFrameworkRoles, ","),
defaultPodRoles: strings.Split(defaultPodRoles, ","),
reconcileInterval: defaultReconcileInterval,
reconcileCooldown: defaultReconcileCooldown,
checkpoint: true,
frameworkName: defaultFrameworkName,
ha: false,
mux: http.NewServeMux(),
kubeletCadvisorPort: 4194, // copied from github.com/GoogleCloudPlatform/kubernetes/blob/release-0.14/cmd/kubelet/app/server.go
kubeletSyncFrequency: 10 * time.Second,
kubeletEnableDebuggingHandlers: true,
containPodResources: true,
nodeRelistPeriod: defaultNodeRelistPeriod,
conntrackTCPTimeoutEstablished: 0, // non-zero values may require hand-tuning other sysctl's on the host; do so with caution
useHostPortEndpoints: true,
// non-zero values can trigger failures when updating /sys/module/nf_conntrack/parameters/hashsize
// when kube-proxy is running in a non-root netns (init_net); setting this to a non-zero value will
// impact connection tracking for the entire host on which kube-proxy is running. xref (k8s#19182)
conntrackMax: 0,
}
// cache this for later use. also useful in case the original binary gets deleted, e.g.
// during upgrades, development deployments, etc.
if filename, err := osext.Executable(); err != nil {
log.Fatalf("failed to determine path to currently running executable: %v", err)
} else {
s.executable = filename
s.kmPath = filename
}
return &s
}
func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.port, "port", s.port, "The port that the scheduler's http service runs on")
fs.IPVar(&s.address, "address", s.address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
fs.BoolVar(&s.enableProfiling, "profiling", s.enableProfiling, "Enable profiling via web interface host:port/debug/pprof/")
fs.StringSliceVar(&s.apiServerList, "api-servers", s.apiServerList, "List of Kubernetes API servers for publishing events, and reading pods and services. (ip:port), comma separated.")
fs.StringVar(&s.kubeconfig, "kubeconfig", s.kubeconfig, "Path to kubeconfig file with authorization and master location information.")
fs.Float32Var(&s.kubeAPIQPS, "kube-api-qps", s.kubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.IntVar(&s.kubeAPIBurst, "kube-api-burst", s.kubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.StringSliceVar(&s.etcdServerList, "etcd-servers", s.etcdServerList, "List of etcd servers to watch (http://ip:port), comma separated.")
fs.BoolVar(&s.allowPrivileged, "allow-privileged", s.allowPrivileged, "Enable privileged containers in the kubelet (compare the same flag in the apiserver).")
fs.StringVar(&s.clusterDomain, "cluster-domain", s.clusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains")
fs.IPVar(&s.clusterDNS, "cluster-dns", s.clusterDNS, "IP address for a cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution in addition to the host's DNS servers")
fs.StringVar(&s.staticPodsConfigPath, "static-pods-config", s.staticPodsConfigPath, "Path for specification of static pods. Path should point to dir containing the staticPods configuration files. Defaults to none.")
fs.StringVar(&s.mesosMaster, "mesos-master", s.mesosMaster, "Location of the Mesos master. The format is a comma-delimited list of of hosts like zk://host1:port,host2:port/mesos. If using ZooKeeper, pay particular attention to the leading zk:// and trailing /mesos! If not using ZooKeeper, standard URLs like http://localhost are also acceptable.")
fs.StringVar(&s.mesosUser, "mesos-user", s.mesosUser, "Mesos user for this framework, defaults to root.")
fs.StringSliceVar(&s.frameworkRoles, "mesos-framework-roles", s.frameworkRoles, "Mesos framework roles that the scheduler receives offers for. Currently only \"*\" and optionally one additional role are supported.")
fs.StringSliceVar(&s.defaultPodRoles, "mesos-default-pod-roles", s.defaultPodRoles, "Roles that will be used to launch pods having no "+meta.RolesKey+" label.")
fs.StringVar(&s.mesosAuthPrincipal, "mesos-authentication-principal", s.mesosAuthPrincipal, "Mesos authentication principal.")
fs.StringVar(&s.mesosAuthSecretFile, "mesos-authentication-secret-file", s.mesosAuthSecretFile, "Mesos authentication secret file.")
fs.StringVar(&s.mesosAuthProvider, "mesos-authentication-provider", s.mesosAuthProvider, fmt.Sprintf("Authentication provider to use, default is SASL that supports mechanisms: %+v", mech.ListSupported()))
fs.StringVar(&s.dockerCfgPath, "dockercfg-path", s.dockerCfgPath, "Path to a dockercfg file that will be used by the docker instance of the minions.")
fs.StringVar(&s.mesosCgroupPrefix, "mesos-cgroup-prefix", s.mesosCgroupPrefix, "The cgroup prefix concatenated with MESOS_DIRECTORY must give the executor cgroup set by Mesos")
fs.Var(&s.mesosExecutorCPUs, "mesos-executor-cpus", "Initial CPU shares to allocate for each Mesos executor container.")
fs.Var(&s.mesosExecutorMem, "mesos-executor-mem", "Initial memory (MB) to allocate for each Mesos executor container.")
fs.BoolVar(&s.checkpoint, "checkpoint", s.checkpoint, "Enable/disable checkpointing for the kubernetes-mesos framework.")
fs.Float64Var(&s.failoverTimeout, "failover-timeout", s.failoverTimeout, fmt.Sprintf("Framework failover timeout, in sec."))
fs.BoolVar(&s.generateTaskDiscovery, "mesos-generate-task-discovery", s.generateTaskDiscovery, "Enable/disable generation of DiscoveryInfo for Mesos tasks.")
fs.UintVar(&s.driverPort, "driver-port", s.driverPort, "Port that the Mesos scheduler driver process should listen on.")
fs.StringVar(&s.hostnameOverride, "hostname-override", s.hostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
fs.Int64Var(&s.reconcileInterval, "reconcile-interval", s.reconcileInterval, "Interval at which to execute task reconciliation, in sec. Zero disables.")
fs.DurationVar(&s.reconcileCooldown, "reconcile-cooldown", s.reconcileCooldown, "Minimum rest period between task reconciliation operations.")
fs.StringVar(&s.schedulerConfigFileName, "scheduler-config", s.schedulerConfigFileName, "An ini-style configuration file with low-level scheduler settings.")
fs.BoolVar(&s.graceful, "graceful", s.graceful, "Indicator of a graceful failover, intended for internal use only.")
fs.BoolVar(&s.ha, "ha", s.ha, "Run the scheduler in high availability mode with leader election. All peers should be configured exactly the same.")
fs.StringVar(&s.frameworkName, "framework-name", s.frameworkName, "The framework name to register with Mesos.")
fs.StringVar(&s.frameworkStoreURI, "framework-store-uri", s.frameworkStoreURI, "Where the framework should store metadata, either in Zookeeper (zk://host:port/path) or in etcd (etcd://path).")
fs.StringVar(&s.frameworkWebURI, "framework-weburi", s.frameworkWebURI, "A URI that points to a web-based interface for interacting with the framework.")
fs.StringVar(&s.advertisedAddress, "advertised-address", s.advertisedAddress, "host:port address that is advertised to clients. May be used to construct artifact download URIs.")
fs.IPVar(&s.serviceAddress, "service-address", s.serviceAddress, "The service portal IP address that the scheduler should register with (if unset, chooses randomly)")
fs.Var(&s.defaultContainerCPULimit, "default-container-cpu-limit", "Containers without a CPU resource limit are admitted this much CPU shares")
fs.Var(&s.defaultContainerMemLimit, "default-container-mem-limit", "Containers without a memory resource limit are admitted this much amount of memory in MB")
fs.BoolVar(&s.containPodResources, "contain-pod-resources", s.containPodResources, "Reparent pod containers into mesos cgroups; disable if you're having strange mesos/docker/systemd interactions.")
fs.DurationVar(&s.nodeRelistPeriod, "node-monitor-period", s.nodeRelistPeriod, "Period between relisting of all nodes from the apiserver.")
fs.BoolVar(&s.useHostPortEndpoints, "host-port-endpoints", s.useHostPortEndpoints, "Map service endpoints to hostIP:hostPort instead of podIP:containerPort. Default true.")
fs.IntVar(&s.executorLogV, "executor-logv", s.executorLogV, "Logging verbosity of spawned minion and executor processes.")
fs.BoolVar(&s.executorBindall, "executor-bindall", s.executorBindall, "When true will set -address of the executor to 0.0.0.0.")
fs.DurationVar(&s.executorSuicideTimeout, "executor-suicide-timeout", s.executorSuicideTimeout, "Executor self-terminates after this period of inactivity. Zero disables suicide watch.")
fs.DurationVar(&s.launchGracePeriod, "mesos-launch-grace-period", s.launchGracePeriod, "Launch grace period after which launching tasks will be cancelled. Zero disables launch cancellation.")
fs.StringVar(&s.sandboxOverlay, "mesos-sandbox-overlay", s.sandboxOverlay, "Path to an archive (tar.gz, tar.bz2 or zip) extracted into the sandbox.")
fs.BoolVar(&s.proxyBindall, "proxy-bindall", s.proxyBindall, "When true pass -proxy-bindall to the executor.")
fs.BoolVar(&s.runProxy, "run-proxy", s.runProxy, "Run the kube-proxy as a side process of the executor.")
fs.IntVar(&s.proxyLogV, "proxy-logv", s.proxyLogV, "Logging verbosity of spawned minion proxy processes.")
fs.StringVar(&s.proxyMode, "proxy-mode", s.proxyMode, "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.")
fs.StringVar(&s.minionPathOverride, "minion-path-override", s.minionPathOverride, "Override the PATH in the environment of the minion sub-processes.")
fs.Var(resource.NewQuantityFlagValue(&s.minionLogMaxSize), "minion-max-log-size", "Maximum log file size for the executor and proxy before rotation")
fs.IntVar(&s.minionLogMaxAgeInDays, "minion-max-log-age", s.minionLogMaxAgeInDays, "Maximum log file age of the executor and proxy in days")
fs.IntVar(&s.minionLogMaxBackups, "minion-max-log-backups", s.minionLogMaxBackups, "Maximum log file backups of the executor and proxy to keep after rotation")
fs.StringVar(&s.kubeletRootDirectory, "kubelet-root-dir", s.kubeletRootDirectory, "Directory path for managing kubelet files (volume mounts,etc). Defaults to executor sandbox.")
fs.StringVar(&s.kubeletDockerEndpoint, "kubelet-docker-endpoint", s.kubeletDockerEndpoint, "If non-empty, kubelet will use this for the docker endpoint to communicate with.")
fs.StringVar(&s.kubeletPodInfraContainerImage, "kubelet-pod-infra-container-image", s.kubeletPodInfraContainerImage, "The image whose network/ipc namespaces containers in each pod will use.")
fs.UintVar(&s.kubeletCadvisorPort, "kubelet-cadvisor-port", s.kubeletCadvisorPort, "The port of the kubelet's local cAdvisor endpoint")
fs.StringVar(&s.kubeletHostNetworkSources, "kubelet-host-network-sources", s.kubeletHostNetworkSources, "Comma-separated list of sources from which the Kubelet allows pods to use of host network. For all sources use \"*\" [default=\"file\"]")
fs.DurationVar(&s.kubeletSyncFrequency, "kubelet-sync-frequency", s.kubeletSyncFrequency, "Max period between synchronizing running containers and config")
fs.StringVar(&s.kubeletNetworkPluginName, "kubelet-network-plugin", s.kubeletNetworkPluginName, "<Warning: Alpha feature> The name of the network plugin to be invoked for various events in kubelet/pod lifecycle")
fs.BoolVar(&s.kubeletEnableDebuggingHandlers, "kubelet-enable-debugging-handlers", s.kubeletEnableDebuggingHandlers, "Enables kubelet endpoints for log collection and local running of containers and commands")
fs.IntVar(&s.conntrackMax, "conntrack-max", s.conntrackMax, "Maximum number of NAT connections to track on agent nodes (0 to leave as-is)")
fs.IntVar(&s.conntrackTCPTimeoutEstablished, "conntrack-tcp-timeout-established", s.conntrackTCPTimeoutEstablished, "Idle timeout for established TCP connections on agent nodes (0 to leave as-is)")
//TODO(jdef) support this flag once we have a better handle on mesos-dns and k8s DNS integration
//fs.StringVar(&s.HADomain, "ha-domain", s.HADomain, "Domain of the HA scheduler service, only used in HA mode. If specified may be used to construct artifact download URIs.")
}
func (s *SchedulerServer) AddStandaloneFlags(fs *pflag.FlagSet) {
s.addCoreFlags(fs)
fs.StringVar(&s.executorPath, "executor-path", s.executorPath, "Location of the kubernetes executor executable")
}
func (s *SchedulerServer) AddHyperkubeFlags(fs *pflag.FlagSet) {
s.addCoreFlags(fs)
fs.StringVar(&s.kmPath, "km-path", s.kmPath, "Location of the km executable, may be a URI or an absolute file path.")
}
// returns (downloadURI, basename(path))
func (s *SchedulerServer) serveFrameworkArtifact(path string) (string, string) {
basename := filepath.Base(path)
return s.serveFrameworkArtifactWithFilename(path, basename), basename
}
// returns downloadURI
func (s *SchedulerServer) serveFrameworkArtifactWithFilename(path string, filename string) string {
serveFile := func(pattern string, filepath string) {
s.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath)
})
}
serveFile("/"+filename, path)
hostURI := ""
if s.advertisedAddress != "" {
hostURI = fmt.Sprintf("http://%s/%s", s.advertisedAddress, filename)
} else if s.ha && s.haDomain != "" {
hostURI = fmt.Sprintf("http://%s.%s:%d/%s", SCHEDULER_SERVICE_NAME, s.haDomain, ports.SchedulerPort, filename)
} else {
hostURI = fmt.Sprintf("http://%s:%d/%s", s.address.String(), s.port, filename)
}
log.V(2).Infof("Hosting artifact '%s' at '%s'", filename, hostURI)
return hostURI
}
func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.ExecutorInfo, error) {
ci := &mesos.CommandInfo{
Shell: proto.Bool(false),
}
if s.executorPath != "" {
uri, executorCmd := s.serveFrameworkArtifact(s.executorPath)
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)})
ci.Value = proto.String(fmt.Sprintf("./%s", executorCmd))
ci.Arguments = append(ci.Arguments, ci.GetValue())
} else if !hks.FindServer(hyperkube.CommandMinion) {
return nil, fmt.Errorf("either run this scheduler via km or else --executor-path is required")
} else {
if strings.Index(s.kmPath, "://") > 0 {
// URI could point directly to executable, e.g. hdfs:///km
// or else indirectly, e.g. http://acmestorage/tarball.tgz
// so we assume that for this case the command will always "km"
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(s.kmPath), Executable: proto.Bool(true)})
ci.Value = proto.String("./km") // TODO(jdef) extract constant
} else if s.kmPath != "" {
uri, kmCmd := s.serveFrameworkArtifact(s.kmPath)
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)})
ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd))
} else {
uri, kmCmd := s.serveFrameworkArtifact(s.executable)
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)})
ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd))
}
ci.Arguments = append(ci.Arguments, ci.GetValue(), hyperkube.CommandMinion)
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--run-proxy=%v", s.runProxy))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-bindall=%v", s.proxyBindall))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-logv=%d", s.proxyLogV))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-mode=%v", s.proxyMode))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--path-override=%s", s.minionPathOverride))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-size=%v", s.minionLogMaxSize.String()))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-backups=%d", s.minionLogMaxBackups))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-age=%d", s.minionLogMaxAgeInDays))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--conntrack-max=%d", s.conntrackMax))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--conntrack-tcp-timeout-established=%d", s.conntrackTCPTimeoutEstablished))
}
if s.sandboxOverlay != "" {
if _, err := os.Stat(s.sandboxOverlay); os.IsNotExist(err) {
return nil, fmt.Errorf("Sandbox overlay archive not found: %s", s.sandboxOverlay)
}
uri, _ := s.serveFrameworkArtifact(s.sandboxOverlay)
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(false), Extract: proto.Bool(true)})
}
if s.dockerCfgPath != "" {
uri := s.serveFrameworkArtifactWithFilename(s.dockerCfgPath, ".dockercfg")
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(false), Extract: proto.Bool(false)})
}
//TODO(jdef): provide some way (env var?) for users to customize executor config
//TODO(jdef): set -address to 127.0.0.1 if `address` is 127.0.0.1
apiServerArgs := strings.Join(s.apiServerList, ",")
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--api-servers=%s", apiServerArgs))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--v=%d", s.executorLogV)) // this also applies to the minion
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--allow-privileged=%t", s.allowPrivileged))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--suicide-timeout=%v", s.executorSuicideTimeout))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-launch-grace-period=%v", s.launchGracePeriod))
if s.executorBindall {
//TODO(jdef) determine whether hostname-override is really needed for bindall because
//it conflicts with kubelet node status checks/updates
//ci.Arguments = append(ci.Arguments, "--hostname-override=0.0.0.0")
ci.Arguments = append(ci.Arguments, "--address=0.0.0.0")
}
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-cgroup-prefix=%v", s.mesosCgroupPrefix))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--cadvisor-port=%v", s.kubeletCadvisorPort))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--sync-frequency=%v", s.kubeletSyncFrequency))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--contain-pod-resources=%t", s.containPodResources))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--enable-debugging-handlers=%t", s.kubeletEnableDebuggingHandlers))
if s.kubeconfig != "" {
//TODO(jdef) should probably support non-local files, e.g. hdfs:///some/config/file
uri, basename := s.serveFrameworkArtifact(s.kubeconfig)
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri)})
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--kubeconfig=%s", basename))
}
appendOptional := func(name string, value string) {
if value != "" {
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--%s=%s", name, value))
}
}
if s.clusterDNS != nil {
appendOptional("cluster-dns", s.clusterDNS.String())
}
appendOptional("cluster-domain", s.clusterDomain)
appendOptional("root-dir", s.kubeletRootDirectory)
appendOptional("docker-endpoint", s.kubeletDockerEndpoint)
appendOptional("pod-infra-container-image", s.kubeletPodInfraContainerImage)
appendOptional("host-network-sources", s.kubeletHostNetworkSources)
appendOptional("network-plugin", s.kubeletNetworkPluginName)
log.V(1).Infof("prepared executor command %q with args '%+v'", ci.GetValue(), ci.Arguments)
// Create mesos scheduler driver.
execInfo := &mesos.ExecutorInfo{
Command: ci,
Name: proto.String(cloud.KubernetesExecutorName),
Source: proto.String(execcfg.DefaultInfoSource),
}
// Check for staticPods
data, staticPodCPUs, staticPodMem := s.prepareStaticPods()
// set prototype resource. During procument these act as the blue print only.
// In a final ExecutorInfo they might differ due to different procured
// resource roles.
execInfo.Resources = []*mesos.Resource{
mutil.NewScalarResource("cpus", float64(s.mesosExecutorCPUs)+staticPodCPUs),
mutil.NewScalarResource("mem", float64(s.mesosExecutorMem)+staticPodMem),
}
// calculate the ExecutorInfo hash to be used for validating compatibility.
// It is used to determine whether a running executor is compatible with the
// current scheduler configuration. If it is not, offers for those nodes
// are declined by our framework and the operator has to phase out those
// running executors in a cluster.
execInfo.ExecutorId = executorinfo.NewID(execInfo)
execInfo.Data = data
log.V(1).Infof("started with executor id %v", execInfo.ExecutorId.GetValue())
return execInfo, nil
}
func (s *SchedulerServer) prepareStaticPods() (data []byte, staticPodCPUs, staticPodMem float64) {
// TODO(sttts): add a directory watch and tell running executors about updates
if s.staticPodsConfigPath == "" {
return
}
entries, errCh := podutil.ReadFromDir(s.staticPodsConfigPath)
go func() {
// we just skip file system errors for now, do our best to gather
// as many static pod specs as we can.
for err := range errCh {
log.Errorln(err.Error())
}
}()
// validate cpu and memory limits, tracking the running totals in staticPod{CPUs,Mem}
validateResourceLimits := StaticPodValidator(
s.defaultContainerCPULimit,
s.defaultContainerMemLimit,
&staticPodCPUs,
&staticPodMem)
zipped, err := podutil.Gzip(validateResourceLimits.Do(entries))
if err != nil {
log.Errorf("failed to generate static pod data: %v", err)
staticPodCPUs, staticPodMem = 0, 0
} else {
data = zipped
}
return
}
// TODO(jdef): hacked from plugin/cmd/kube-scheduler/app/server.go
func (s *SchedulerServer) createAPIServerClientConfig() (*restclient.Config, error) {
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.apiServerList[0], s.kubeconfig)
if err != nil {
return nil, err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = s.kubeAPIQPS
kubeconfig.Burst = s.kubeAPIBurst
return kubeconfig, nil
}
func (s *SchedulerServer) setDriver(driver bindings.SchedulerDriver) {
s.driverMutex.Lock()
defer s.driverMutex.Unlock()
s.driver = driver
}
func (s *SchedulerServer) getDriver() (driver bindings.SchedulerDriver) {
s.driverMutex.RLock()
defer s.driverMutex.RUnlock()
return s.driver
}
func (s *SchedulerServer) Run(hks hyperkube.Interface, _ []string) error {
if n := len(s.frameworkRoles); n == 0 || n > 2 || (n == 2 && s.frameworkRoles[0] != "*" && s.frameworkRoles[1] != "*") {
log.Fatalf(`only one custom role allowed in addition to "*"`)
}
fwSet := sets.NewString(s.frameworkRoles...)
podSet := sets.NewString(s.defaultPodRoles...)
if !fwSet.IsSuperset(podSet) {
log.Fatalf("all default pod roles %q must be included in framework roles %q", s.defaultPodRoles, s.frameworkRoles)
}
// get scheduler low-level config
sc := schedcfg.CreateDefaultConfig()
if s.schedulerConfigFileName != "" {
f, err := os.Open(s.schedulerConfigFileName)
if err != nil {
log.Fatalf("Cannot open scheduler config file: %v", err)
}
err = sc.Read(bufio.NewReader(f))
if err != nil {
log.Fatalf("Invalid scheduler config file: %v", err)
}
}
schedulerProcess, driverFactory, etcdClient, eid := s.bootstrap(hks, sc)
if s.enableProfiling {
profile.InstallHandler(s.mux)
}
go runtime.Until(func() {
log.V(1).Info("Starting HTTP interface")
log.Error(http.ListenAndServe(net.JoinHostPort(s.address.String(), strconv.Itoa(s.port)), s.mux))
}, sc.HttpBindInterval.Duration, schedulerProcess.Terminal())
if s.ha {
validation := ha.ValidationFunc(validateLeadershipTransition)
srv := ha.NewCandidate(schedulerProcess, driverFactory, validation)
path := meta.ElectionPath(s.frameworkName)
uuid := eid.GetValue() + ":" + uuid.New() // unique for each scheduler instance
log.Infof("registering for election at %v with id %v", path, uuid)
go election.Notify(
election.NewEtcdMasterElector(etcdClient),
path,
uuid,
srv,
nil)
} else {
log.Infoln("self-electing in non-HA mode")
schedulerProcess.Elect(driverFactory)
}
return s.awaitFailover(schedulerProcess, func() error { return s.failover(s.getDriver(), hks) })
}
// watch the scheduler process for failover signals and properly handle such. may never return.
func (s *SchedulerServer) awaitFailover(schedulerProcess schedulerProcessInterface, handler func() error) error {
// we only want to return the first error (if any), everyone else can block forever
errCh := make(chan error, 1)
doFailover := func() error {
// we really don't expect handler to return, if it does something went seriously wrong
err := handler()
if err != nil {
defer schedulerProcess.End()
err = fmt.Errorf("failover failed, scheduler will terminate: %v", err)
}
return err
}
// guard for failover signal processing, first signal processor wins
failoverLatch := &runtime.Latch{}
runtime.On(schedulerProcess.Terminal(), func() {
if !failoverLatch.Acquire() {
log.V(1).Infof("scheduler process ending, already failing over")
select {}
}
var err error
defer func() { errCh <- err }()
select {
case <-schedulerProcess.Failover():
err = doFailover()
default:
if s.ha {
err = fmt.Errorf("ha scheduler exiting instead of failing over")
} else {
log.Infof("exiting scheduler")
}
}
})
runtime.OnOSSignal(makeFailoverSigChan(), func(_ os.Signal) {
if !failoverLatch.Acquire() {
log.V(1).Infof("scheduler process signalled, already failing over")
select {}
}
errCh <- doFailover()
})
return <-errCh
}
func validateLeadershipTransition(desired, current string) {
log.Infof("validating leadership transition")
// desired, current are of the format <executor-id>:<scheduler-uuid> (see Run()).
// parse them and ensure that executor ID's match, otherwise the cluster can get into
// a bad state after scheduler failover: executor ID is a config hash that must remain
// consistent across failover events.
var (
i = strings.LastIndex(desired, ":")
j = strings.LastIndex(current, ":")
)
if i > -1 {
desired = desired[0:i]
} else {
log.Fatalf("desired id %q is invalid", desired)
}
if j > -1 {
current = current[0:j]
} else if current != "" {
log.Fatalf("current id %q is invalid", current)
}
if desired != current && current != "" {
log.Fatalf("desired executor id %q != current executor id %q", desired, current)
}
}
// hacked from https://github.com/kubernetes/kubernetes/blob/release-0.14/cmd/kube-apiserver/app/server.go
func newEtcd(etcdServerList []string) (etcd.Client, error) {
cfg := etcd.Config{
Endpoints: etcdServerList,
}
return etcd.New(cfg)
}
func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config) (*ha.SchedulerProcess, ha.DriverFactory, etcd.Client, *mesos.ExecutorID) {
s.frameworkName = strings.TrimSpace(s.frameworkName)
if s.frameworkName == "" {
log.Fatalf("framework-name must be a non-empty string")
}
s.frameworkWebURI = strings.TrimSpace(s.frameworkWebURI)
metrics.Register()
runtime.Register()
s.mux.Handle("/metrics", prometheus.Handler())
healthz.InstallHandler(s.mux)
if len(s.etcdServerList) == 0 {
log.Fatalf("specify --etcd-servers must be specified")
}
if len(s.apiServerList) < 1 {
log.Fatal("No api servers specified.")
}
clientConfig, err := s.createAPIServerClientConfig()
if err != nil {
log.Fatalf("Unable to make apiserver client config: %v", err)
}
s.client, err = clientset.NewForConfig(clientConfig)
if err != nil {
log.Fatalf("Unable to make apiserver clientset: %v", err)
}
if s.reconcileCooldown < defaultReconcileCooldown {
s.reconcileCooldown = defaultReconcileCooldown
log.Warningf("user-specified reconcile cooldown too small, defaulting to %v", s.reconcileCooldown)
}
eiPrototype, err := s.prepareExecutorInfo(hks)
if err != nil {
log.Fatalf("misconfigured executor: %v", err)
}
// TODO(jdef): remove the dependency on etcd as soon as
// (1) the generic config store is available for the FrameworkId storage
// (2) the generic master election is provided by the apiserver
// Compare docs/proposals/high-availability.md
etcdClient, err := newEtcd(s.etcdServerList)
if err != nil {
log.Fatalf("misconfigured etcd: %v", err)
}
keysAPI := etcd.NewKeysAPI(etcdClient)
// mirror all nodes into the nodeStore
var eiRegistry executorinfo.Registry
nodesClientConfig := *clientConfig
nodesClient, err := clientset.NewForConfig(&nodesClientConfig)
if err != nil {
log.Fatalf("Cannot create client to watch nodes: %v", err)
}
nodeLW := cache.NewListWatchFromClient(nodesClient.CoreClient, "nodes", api.NamespaceAll, fields.Everything())
nodeStore, nodeCtl := controllerfw.NewInformer(nodeLW, &api.Node{}, s.nodeRelistPeriod, &controllerfw.ResourceEventHandlerFuncs{
DeleteFunc: func(obj interface{}) {
node := obj.(*api.Node)
if eiRegistry != nil {
log.V(2).Infof("deleting node %q from registry", node.Name)
eiRegistry.Invalidate(node.Name)
}
},
})
lookupNode := func(hostName string) *api.Node {
n, _, _ := nodeStore.GetByKey(hostName) // ignore error and return nil then
if n == nil {
return nil
}
return n.(*api.Node)
}
execInfoCache, err := executorinfo.NewCache(defaultExecutorInfoCacheSize)
if err != nil {
log.Fatalf("cannot create executorinfo cache: %v", err)
}
eiRegistry, err = executorinfo.NewRegistry(lookupNode, eiPrototype, execInfoCache)
if err != nil {
log.Fatalf("cannot create executorinfo registry: %v", err)
}
pr := podtask.NewDefaultProcurement(eiPrototype, eiRegistry)
fcfs := podschedulers.NewFCFSPodScheduler(pr, lookupNode)
frameworkIDStorage, err := s.frameworkIDStorage(keysAPI)
if err != nil {
log.Fatalf("cannot init framework ID storage: %v", err)
}
framework := framework.New(framework.Config{
SchedulerConfig: *sc,
Client: s.client,
FailoverTimeout: s.failoverTimeout,
ReconcileInterval: s.reconcileInterval,
ReconcileCooldown: s.reconcileCooldown,
LookupNode: lookupNode,
StoreFrameworkId: frameworkIDStorage.Set,
ExecutorId: eiPrototype.GetExecutorId(),
})
masterUri := s.mesosMaster
info, cred, err := s.buildFrameworkInfo()
if err != nil {
log.Fatalf("Misconfigured mesos framework: %v", err)
}
schedulerProcess := ha.New(framework)
// try publishing on the same IP as the slave
var publishedAddress net.IP
if libprocessIP := os.Getenv("LIBPROCESS_IP"); libprocessIP != "" {
publishedAddress = net.ParseIP(libprocessIP)
}
if publishedAddress != nil {
log.V(1).Infof("driver will publish address %v", publishedAddress)
}
dconfig := &bindings.DriverConfig{
Scheduler: schedulerProcess,
Framework: info,
Master: masterUri,
Credential: cred,
BindingAddress: s.address,
BindingPort: uint16(s.driverPort),
PublishedAddress: publishedAddress,
HostnameOverride: s.hostnameOverride,
WithAuthContext: func(ctx context.Context) context.Context {
ctx = auth.WithLoginProvider(ctx, s.mesosAuthProvider)
ctx = sasl.WithBindingAddress(ctx, s.address)
return ctx
},
}
// create event recorder sending events to the "" namespace of the apiserver
eventsClientConfig := *clientConfig
eventsClient, err := clientset.NewForConfig(&eventsClientConfig)
if err != nil {
log.Fatalf("Invalid API configuration: %v", err)
}
broadcaster := record.NewBroadcaster()
recorder := broadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
broadcaster.StartLogging(log.Infof)
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{eventsClient.Events("")})
lw := cache.NewListWatchFromClient(s.client.CoreClient, "pods", api.NamespaceAll, fields.Everything())
hostPortStrategy := hostport.StrategyFixed
if s.useHostPortEndpoints {
hostPortStrategy = hostport.StrategyWildcard
}
// create scheduler core with all components arranged around it
sched := components.New(
sc,
framework,
fcfs,
s.client,
recorder,
schedulerProcess.Terminal(),
s.mux,
lw,
podtask.Config{
DefaultPodRoles: s.defaultPodRoles,
FrameworkRoles: s.frameworkRoles,
GenerateTaskDiscoveryEnabled: s.generateTaskDiscovery,
HostPortStrategy: hostPortStrategy,
Prototype: eiPrototype,
},
s.defaultContainerCPULimit,
s.defaultContainerMemLimit,
)
runtime.On(framework.Registration(), func() { sched.Run(schedulerProcess.Terminal()) })
runtime.On(framework.Registration(), s.newServiceWriter(publishedAddress, schedulerProcess.Terminal()))
runtime.On(framework.Registration(), func() { nodeCtl.Run(schedulerProcess.Terminal()) })
driverFactory := ha.DriverFactory(func() (drv bindings.SchedulerDriver, err error) {
log.V(1).Infoln("performing deferred initialization")
if err = framework.Init(sched, schedulerProcess.Master(), s.mux); err != nil {
return nil, fmt.Errorf("failed to initialize pod scheduler: %v", err)
}
log.V(1).Infoln("deferred init complete")
if s.failoverTimeout > 0 {
// defer obtaining framework ID to prevent multiple schedulers
// from overwriting each other's framework IDs
var frameworkID string
frameworkID, err = frameworkIDStorage.Get(context.TODO())
if err != nil {
return nil, fmt.Errorf("failed to fetch framework ID from storage: %v", err)
}
if frameworkID != "" {
log.Infof("configuring FrameworkInfo with ID found in storage: %q", frameworkID)
dconfig.Framework.Id = &mesos.FrameworkID{Value: &frameworkID}
} else {
log.V(1).Infof("did not find framework ID in storage")
}
} else {
// TODO(jdef) this is a hack, really for development, to simplify clean up of old framework IDs
frameworkIDStorage.Remove(context.TODO())
}
log.V(1).Infoln("constructing mesos scheduler driver")
drv, err = bindings.NewMesosSchedulerDriver(*dconfig)
if err != nil {
return nil, fmt.Errorf("failed to construct scheduler driver: %v", err)
}
log.V(1).Infoln("constructed mesos scheduler driver:", drv)
s.setDriver(drv)
return drv, nil
})
return schedulerProcess, driverFactory, etcdClient, eiPrototype.GetExecutorId()
}
func (s *SchedulerServer) failover(driver bindings.SchedulerDriver, hks hyperkube.Interface) error {
if driver != nil {
stat, err := driver.Stop(true)
if stat != mesos.Status_DRIVER_STOPPED {
return fmt.Errorf("failed to stop driver for failover, received unexpected status code: %v", stat)
} else if err != nil {
return err
}
}
// there's no guarantee that all goroutines are actually programmed intelligently with 'done'
// signals, so we'll need to restart if we want to really stop everything
// run the same command that we were launched with
//TODO(jdef) assumption here is that the sheduler is the only service running in this process, we should probably validate that somehow
args := []string{}
flags := pflag.CommandLine
if hks != nil {
args = append(args, hks.Name())
flags = hks.Flags()
}
flags.Visit(func(flag *pflag.Flag) {
if flag.Name != "api-servers" && flag.Name != "etcd-servers" {
args = append(args, fmt.Sprintf("--%s=%s", flag.Name, flag.Value.String()))
}
})
if !s.graceful {
args = append(args, "--graceful")
}
if len(s.apiServerList) > 0 {
args = append(args, "--api-servers="+strings.Join(s.apiServerList, ","))
}
if len(s.etcdServerList) > 0 {
args = append(args, "--etcd-servers="+strings.Join(s.etcdServerList, ","))
}
args = append(args, flags.Args()...)
log.V(1).Infof("spawning scheduler for graceful failover: %s %+v", s.executable, args)
cmd := exec.Command(s.executable, args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = makeDisownedProcAttr()
// TODO(jdef) pass in a pipe FD so that we can block, waiting for the child proc to be ready
//cmd.ExtraFiles = []*os.File{}
exitcode := 0
log.Flush() // TODO(jdef) it would be really nice to ensure that no one else in our process was still logging
if err := cmd.Start(); err != nil {
//log to stdtout here to avoid conflicts with normal stderr logging
fmt.Fprintf(os.Stdout, "failed to spawn failover process: %v\n", err)
os.Exit(1)
}
os.Exit(exitcode)
select {} // will never reach here
}
func (s *SchedulerServer) buildFrameworkInfo() (info *mesos.FrameworkInfo, cred *mesos.Credential, err error) {
username, err := s.getUsername()
if err != nil {
return nil, nil, err
}
log.V(2).Infof("Framework configured with mesos user %v", username)
info = &mesos.FrameworkInfo{
Name: proto.String(s.frameworkName),
User: proto.String(username),
Checkpoint: proto.Bool(s.checkpoint),
}
if s.frameworkWebURI != "" {
info.WebuiUrl = proto.String(s.frameworkWebURI)
}
if s.failoverTimeout > 0 {
info.FailoverTimeout = proto.Float64(s.failoverTimeout)
}
// set the framework's role to the first configured non-star role.
// once Mesos supports multiple roles simply set the configured mesos roles slice.
for _, role := range s.frameworkRoles {
if role != "*" {
// mesos currently supports only one role per framework info
// The framework will be offered role's resources as well as * resources
info.Role = proto.String(role)
break
}
}
if s.mesosAuthPrincipal != "" {
info.Principal = proto.String(s.mesosAuthPrincipal)
cred = &mesos.Credential{
Principal: proto.String(s.mesosAuthPrincipal),
}
if s.mesosAuthSecretFile != "" {
secret, err := ioutil.ReadFile(s.mesosAuthSecretFile)
if err != nil {
return nil, nil, err
}
cred.Secret = proto.String(string(secret))
}
}
return
}
func (s *SchedulerServer) getUsername() (username string, err error) {
username = s.mesosUser
if username == "" {
if u, err := user.Current(); err == nil {
username = u.Username
if username == "" {
username = defaultMesosUser
}
}
}
return
}
func (s *SchedulerServer) frameworkIDStorage(keysAPI etcd.KeysAPI) (frameworkid.Storage, error) {
u, err := url.Parse(s.frameworkStoreURI)
if err != nil {
return nil, fmt.Errorf("cannot parse framework store URI: %v", err)
}
switch u.Scheme {
case "etcd":
idpath := meta.StoreChroot
if u.Path != "" {
idpath = path.Join("/", u.Path)
}
idpath = path.Join(idpath, s.frameworkName, "frameworkid")
return frameworkidEtcd.Store(keysAPI, idpath, time.Duration(s.failoverTimeout)*time.Second), nil
case "zk":
return frameworkidZk.Store(s.frameworkStoreURI, s.frameworkName), nil
default:
return nil, fmt.Errorf("unsupported framework storage scheme: %q", u.Scheme)
}
}
|
[
"\"LIBPROCESS_IP\""
] |
[] |
[
"LIBPROCESS_IP"
] |
[]
|
["LIBPROCESS_IP"]
|
go
| 1 | 0 | |
src/main/java/com/founder/sso/redis/DatabaseConfigurationProperties.java
|
package com.founder.sso.redis;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
@Component
@ConfigurationProperties(prefix = "spring.datasource")
public class DatabaseConfigurationProperties {
String url;
String username;
String password;
String driverClassName;
String testWhileIdle;
String timeBetweenEvictionRunsMillis;
public String getUrl() {
return (System.getenv("DB_URL") != null && System.getenv("DB_URL") != "") ? System.getenv("DB_URL") : url;
}
public void setUrl(String url) {
this.url = url;
}
public String getUsername() {
return (System.getenv("DB_USER") != null && System.getenv("DB_USER") != "") ? System.getenv("DB_USER")
: username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return (System.getenv("DB_PASSWORD") != null && System.getenv("DB_PASSWORD") != "")
? System.getenv("DB_PASSWORD") : password;
}
public void setPassword(String password) {
this.password = password;
}
public String getDriverClassName() {
return (System.getenv("DB_DRIVER") != null && System.getenv("DB_DRIVER") != "") ? System.getenv("DB_DRIVER")
: driverClassName;
}
public void setDriverClassName(String driverClassName) {
this.driverClassName = driverClassName;
}
public String getTestWhileIdle() {
return testWhileIdle;
}
public void setTestWhileIdle(String testWhileIdle) {
this.testWhileIdle = testWhileIdle;
}
public String getTimeBetweenEvictionRunsMillis() {
return timeBetweenEvictionRunsMillis;
}
public void setTimeBetweenEvictionRunsMillis(String timeBetweenEvictionRunsMillis) {
this.timeBetweenEvictionRunsMillis = timeBetweenEvictionRunsMillis;
}
}
|
[
"\"DB_URL\"",
"\"DB_URL\"",
"\"DB_URL\"",
"\"DB_USER\"",
"\"DB_USER\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"DB_PASSWORD\"",
"\"DB_PASSWORD\"",
"\"DB_DRIVER\"",
"\"DB_DRIVER\"",
"\"DB_DRIVER\""
] |
[] |
[
"DB_DRIVER",
"DB_USER",
"DB_PASSWORD",
"DB_URL"
] |
[]
|
["DB_DRIVER", "DB_USER", "DB_PASSWORD", "DB_URL"]
|
java
| 4 | 0 | |
examples/plot_multiple_dataset.py
|
"""
Plot multiple datasets
----------------------
This is an example of how to download and
plot multiple datasets at a time.
"""
import matplotlib.pyplot as plt
import os
import act
# Place your username and token here
username = os.getenv('ARM_USERNAME')
token = os.getenv('ARM_PASSWORD')
# Get data from the web service if username and token are available
# if not, use test data
if username is None or token is None or len(username) == 0 or len(token) == 0:
ceil_ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_CEIL1)
met_ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_MET1)
else:
# Download and read data
act.discovery.download_data(username, token, 'sgpceilC1.b1', '2019-01-01', '2019-01-07')
ceil_ds = act.io.armfiles.read_netcdf('sgpceilC1.b1/sgpceilC1.b1.201901*.nc')
met_ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_MET_WILDCARD)
# Read in CEIL data and correct it
ceil_ds = act.corrections.ceil.correct_ceil(ceil_ds, -9999.0)
# You can use tuples if the datasets in the tuple contain a
# datastream attribute. This is required in all ARM datasets.
display = act.plotting.TimeSeriesDisplay((ceil_ds, met_ds), subplot_shape=(2,), figsize=(15, 10))
display.plot('backscatter', 'sgpceilC1.b1', subplot_index=(0,))
display.plot('temp_mean', 'sgpmetE13.b1', subplot_index=(1,))
display.day_night_background('sgpmetE13.b1', subplot_index=(1,))
plt.show()
# You can also use a dictionary so that you can customize
# your datastream names to something that may be more useful.
display = act.plotting.TimeSeriesDisplay(
{'ceiliometer': ceil_ds, 'met': met_ds}, subplot_shape=(2,), figsize=(15, 10)
)
display.plot('backscatter', 'ceiliometer', subplot_index=(0,))
display.plot('temp_mean', 'met', subplot_index=(1,))
display.day_night_background('met', subplot_index=(1,))
plt.show()
ceil_ds.close()
met_ds.close()
|
[] |
[] |
[
"ARM_USERNAME",
"ARM_PASSWORD"
] |
[]
|
["ARM_USERNAME", "ARM_PASSWORD"]
|
python
| 2 | 0 | |
examples/consensus_pubsub_with_submit_key.py
|
import os
import time
from random import randint
from hedera import (
PrivateKey,
TopicCreateTransaction,
TopicMessageQuery,
TopicMessageSubmitTransaction,
PyConsumer,
)
from get_client import client
from jnius import autoclass
messagesToPublish = 5
secondsBetweenMessages = 2
# this need to be cleaned up
mirror_node_address = os.environ.get('MIRROR_NODE_ADDRESS', "hcs.testnet.mirrornode.hedera.com:5600")
JList = autoclass("java.util.List")
mirror_node = JList.of(mirror_node_address)
client.setMirrorNetwork(mirror_node)
# createTopicWithSubmitKey
submitKey = PrivateKey.generate()
submitPublicKey = submitKey.getPublicKey()
resp = (TopicCreateTransaction()
.setTopicMemo("HCS topic with submit key")
.setSubmitKey(submitPublicKey)
.execute(client))
topicId = resp.getReceipt(client).topicId
print("Created new topic ", topicId.toString(), " with ED25519 submitKey of ", submitKey.toString())
time.sleep(5)
def showMsg(*args):
print("time: {} received topic message: {}".format(args[0], args[2]))
# subscribeToTopic
# will not use this: .setStartTime(Instant.ofEpochSecond(0))
# Instant is org.threeten.bp backport, but hedera sdk already use at least java 8
query = (TopicMessageQuery()
.setTopicId(topicId)
.subscribe(client, PyConsumer(showMsg)))
time.sleep(2)
# publishMessagesToTopic
for i in range(messagesToPublish):
message = "random message " + str(randint(0, 10 ** 9))
print("Publishing message: ", message)
# The transaction is automatically signed by the payer.
# Due to the topic having a submitKey requirement, additionally sign the transaction with that key.
receipt = (TopicMessageSubmitTransaction()
.setTopicId(topicId)
.setMessage(message)
.freezeWith(client)
.sign(submitKey)
.execute(client)
.transactionId.getReceipt(client))
time.sleep(secondsBetweenMessages)
time.sleep(10)
|
[] |
[] |
[
"MIRROR_NODE_ADDRESS"
] |
[]
|
["MIRROR_NODE_ADDRESS"]
|
python
| 1 | 0 | |
bot.py
|
import discord
from discord.ext import commands
from helpers.logHelper import logger
import os
import logging
from pymongo import MongoClient
from helpers.getPrefix import getPrefix
import ast
from helpers.getWeather import getWeather
import time
from pretty_help import PrettyHelp
logging.basicConfig(level=logging.INFO)
DISCORD_TOKEN = os.environ.get("DISCORD_TOKEN", None)
MONGODB = os.environ.get("MONGODB", None)
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix="nb.", help_command=PrettyHelp(), intents=intents)
# bot = commands.Bot(command_prefix='*', help_command=None)
client = MongoClient(MONGODB)
db = client["discord"]
collection = db["bot"]
all_categories = [category for category in os.listdir("./cogs")]
print(all_categories)
for category in all_categories:
for filename in os.listdir(f"./cogs/{category}"):
try:
if filename.endswith(".py"):
bot.load_extension(f"cogs.{category}.{filename[:-3]}")
logger.info(f"Succesfully Loaded Cog: {filename}")
else:
print(f"Unable to load {filename}")
logger.warning(
f"Unable to load {filename}, is it suppose to be in cog directory?"
)
except Exception as e:
logger.warning(f"Unable to load cog: {e}")
"""
check for frequency data in mongo and create a doc for it if it doesnt exist
"""
if not collection.find_one({"_id": "word_command_freq"}):
freq_data_exist = collection.find_one({"_id": "word_command_freq"})
collection.insert_one({"_id": "word_command_freq"})
if not collection.find_one({"_id": "paper_trading_accounts"}):
freq_data_exist = collection.find_one({"_id": "paper_trading_accounts"})
collection.insert_one({"_id": "paper_trading_accounts"})
@bot.event
async def on_message(message):
user = message.author
contents = message.content.split(" ")
word = contents[0]
member = str(message.author)
if user.bot:
pass
elif not word:
print("no words to add")
else:
try:
if "." in word:
word = word.replace(".", "(Dot)")
if "$" in word:
word = word.replace("$", "(Dollar_Sign)")
except Exception as e:
print(str(e) + "Caught in on_message")
logger.warning(e)
print(member + ": " + word)
# is_in_word_command_freq=collection.find_one({"_id":"word_command_freq",word:{"$size": 0}})
# print(is_in_word_command_freq)
if collection.find_one({"_id": "word_command_freq", word: {"$exists": True}}):
collection.update_one({"_id": "word_command_freq"}, {"$inc": {word: 1}})
print("incremented freq value " + word + " by 1 in word_command_freq doc")
else:
print(collection.update({"_id": "word_command_freq"}, {"$set": {word: 1}}))
print("added " + word + " to word_command_freq")
# print(collection.find_one({"_id": "word_command_freq"}))
await bot.process_commands(message)
@bot.event
async def on_guild_join(guild):
guild_id = guild.id
collection.insert_one({"_id": guild_id, "prefix": ","})
print("done")
async def latency(ctx):
time_1 = time.perf_counter()
await ctx.trigger_typing()
time_2 = time.perf_counter()
ping = round((time_2 - time_1) * 1000)
await ctx.send(f"ping = {ping}")
try:
bot.run(DISCORD_TOKEN)
logger.info("Bot Is Off\n----------------------------------- END OF SESSION")
except Exception as e:
logger.warning(f"Bot Failed to initialise: {e}")
|
[] |
[] |
[
"DISCORD_TOKEN",
"MONGODB"
] |
[]
|
["DISCORD_TOKEN", "MONGODB"]
|
python
| 2 | 0 | |
app.py
|
import logging
import os
from tornado.ioloop import IOLoop
from handlers import WatcherServer, MessageServer
logging.basicConfig(level=logging.DEBUG)
log = logging.Logger(__name__)
def main():
watchers = set()
sources_statistics = dict()
message_server = MessageServer(watchers, sources_statistics)
message_port = int(os.environ.get("MESSAGE_PORT", 8888))
message_server.listen(message_port)
watcher_server = WatcherServer(watchers, sources_statistics)
watcher_port = int(os.environ.get("WATCHER_PORT", 8889))
watcher_server.listen(watcher_port)
IOLoop.current().start()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"WATCHER_PORT",
"MESSAGE_PORT"
] |
[]
|
["WATCHER_PORT", "MESSAGE_PORT"]
|
python
| 2 | 0 | |
common/queue.go
|
package common
import (
"net"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog"
"github.com/monnand/goredis"
)
// Message queue
type Queue interface {
// Publish 'message' on 'queue' (Redis calls it 'channel')
Publish(queue string, message []byte) error
// Append item to the end (right) of a list. Creates the list if needed.
Rpush(key string, val []byte) error
// Append item to the beginning (left) of a list. Creates the list if needed.
Lpush(key string, val []byte) error
// Blocking Pop from one or more Redis lists
Blpop(keys []string, timeoutsecs uint) (*string, []byte, error)
// Check if queue is available. First return arg is "PONG".
Ping() (string, error)
// List length
Llen(string) (int, error)
// Trim list to given range
Ltrim(string, int, int) error
}
/*
* Mock QUEUE
*/
// Simplistic Queue implementation used by the test suite
type MockQueue struct {
sync.RWMutex
Got map[string][]string
ReadChannel chan string
}
func NewMockQueue() *MockQueue {
return &MockQueue{
Got: make(map[string][]string),
ReadChannel: make(chan string),
}
}
func (mq *MockQueue) Publish(queue string, message []byte) error {
mq.Lock()
defer mq.Unlock()
mq.Got[queue] = append(mq.Got[queue], string(message))
return nil
}
func (mq *MockQueue) Rpush(key string, val []byte) error {
mq.Lock()
defer mq.Unlock()
mq.Got[key] = append(mq.Got[key], string(val))
return nil
}
func (mq *MockQueue) Lpush(key string, val []byte) error {
mq.Lock()
defer mq.Unlock()
// TODO insert at the beginning of the slice
mq.Got[key] = append(mq.Got[key], string(val))
return nil
}
func (mq *MockQueue) Blpop(keys []string, timeoutsecs uint) (*string, []byte, error) {
val := <-mq.ReadChannel
return &keys[0], []byte(val), nil
}
func (mq *MockQueue) Llen(key string) (int, error) {
mq.RLock()
defer mq.RUnlock()
return len(mq.Got), nil
}
func (mq *MockQueue) Ltrim(key string, start int, end int) error {
return nil
}
func (mq *MockQueue) Ping() (string, error) {
return "PONG", nil
}
/*
* REDIS WRAPPER
* Survives Redis restarts, waits for Redis to be available.
* Implements common.Queue
*/
type RedisQueue struct {
queue Queue
}
func NewRedisQueue() Queue {
redisUrlString := os.Getenv("REDIS_PLUGIN_QUEUE_URL")
if redisUrlString == "" {
glog.Fatal("REDIS_PLUGIN_QUEUE_URL cannot be empty.\nexport REDIS_PLUGIN_QUEUE_URL=redis://host:port/db_number")
}
redisUrl, err := url.Parse(redisUrlString)
if err != nil {
glog.Fatal("Could not read Redis string", err)
}
redisDb, err := strconv.Atoi(strings.TrimLeft(redisUrl.Path, "/"))
if err != nil {
glog.Fatal("Could not read Redis path", err)
}
redisQueue := goredis.Client{Addr: redisUrl.Host, Db: redisDb}
rq := RedisQueue{queue: &redisQueue}
rq.waitForRedis()
return &rq
}
func (rq *RedisQueue) waitForRedis() {
_, err := rq.queue.Ping()
for err != nil {
glog.Errorln("Waiting for redis...")
time.Sleep(1 * time.Second)
_, err = rq.queue.Ping()
}
}
func (rq *RedisQueue) Publish(queue string, message []byte) error {
err := rq.queue.Publish(queue, message)
if err == nil {
return nil
}
netErr := err.(net.Error)
if netErr.Timeout() || netErr.Temporary() {
return err
}
rq.waitForRedis()
return rq.Publish(queue, message) // Recurse
}
func (rq *RedisQueue) Blpop(keys []string, timeoutsecs uint) (*string, []byte, error) {
key, val, err := rq.queue.Blpop(keys, timeoutsecs)
if err == nil {
return key, val, nil
}
netErr := err.(net.Error)
if netErr.Timeout() || netErr.Temporary() {
return key, val, err
}
rq.waitForRedis()
return rq.Blpop(keys, timeoutsecs) // Recurse
}
func (rq *RedisQueue) Rpush(key string, val []byte) error {
err := rq.queue.Rpush(key, val)
if err == nil {
return nil
}
netErr := err.(net.Error)
if netErr.Timeout() || netErr.Temporary() {
return err
}
rq.waitForRedis()
return rq.Rpush(key, val) // Recurse
}
func (rq *RedisQueue) Lpush(key string, val []byte) error {
err := rq.queue.Lpush(key, val)
if err == nil {
return nil
}
netErr := err.(net.Error)
if netErr.Timeout() || netErr.Temporary() {
return err
}
rq.waitForRedis()
return rq.Lpush(key, val) // Recurse
}
func (rq *RedisQueue) Llen(key string) (int, error) {
size, err := rq.queue.Llen(key)
if err == nil {
return size, nil
}
netErr := err.(net.Error)
if netErr.Timeout() || netErr.Temporary() {
return size, err
}
rq.waitForRedis()
return rq.Llen(key) // Recurse
}
func (rq *RedisQueue) Ltrim(key string, start int, end int) error {
err := rq.queue.Ltrim(key, start, end)
if err == nil {
return nil
}
netErr := err.(net.Error)
if netErr.Timeout() || netErr.Temporary() {
return err
}
rq.waitForRedis()
return rq.Ltrim(key, start, end) // Recurse
}
func (rq *RedisQueue) Ping() (string, error) {
return rq.queue.Ping()
}
|
[
"\"REDIS_PLUGIN_QUEUE_URL\""
] |
[] |
[
"REDIS_PLUGIN_QUEUE_URL"
] |
[]
|
["REDIS_PLUGIN_QUEUE_URL"]
|
go
| 1 | 0 | |
server/service/service_osquery.go
|
package service
import (
"context"
"encoding/json"
"fmt"
"github.com/fleetdm/fleet/v4/server"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/fleetdm/fleet/v4/server/fleet"
hostctx "github.com/fleetdm/fleet/v4/server/contexts/host"
"github.com/fleetdm/fleet/v4/server/pubsub"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/spf13/cast"
)
type osqueryError struct {
message string
nodeInvalid bool
}
func (e osqueryError) Error() string {
return e.message
}
func (e osqueryError) NodeInvalid() bool {
return e.nodeInvalid
}
// Sometimes osquery gives us empty string where we expect an integer.
// We change the to "0" so it can be handled by the appropriate string to
// integer conversion function, as these will err on ""
func emptyToZero(val string) string {
if val == "" {
return "0"
}
return val
}
func (svc Service) AuthenticateHost(ctx context.Context, nodeKey string) (*fleet.Host, error) {
// skipauth: Authorization is currently for user endpoints only.
svc.authz.SkipAuthorization(ctx)
if nodeKey == "" {
return nil, osqueryError{
message: "authentication error: missing node key",
nodeInvalid: true,
}
}
host, err := svc.ds.AuthenticateHost(nodeKey)
if err != nil {
switch err.(type) {
case fleet.NotFoundError:
return nil, osqueryError{
message: "authentication error: invalid node key: " + nodeKey,
nodeInvalid: true,
}
default:
return nil, osqueryError{
message: "authentication error: " + err.Error(),
}
}
}
// Update the "seen" time used to calculate online status. These updates are
// batched for MySQL performance reasons. Because this is done
// asynchronously, it is possible for the server to shut down before
// updating the seen time for these hosts. This seems to be an acceptable
// tradeoff as an online host will continue to check in and quickly be
// marked online again.
svc.seenHostSet.addHostID(host.ID)
host.SeenTime = svc.clock.Now()
return host, nil
}
func (svc Service) EnrollAgent(ctx context.Context, enrollSecret, hostIdentifier string, hostDetails map[string](map[string]string)) (string, error) {
// skipauth: Authorization is currently for user endpoints only.
svc.authz.SkipAuthorization(ctx)
secret, err := svc.ds.VerifyEnrollSecret(enrollSecret)
if err != nil {
return "", osqueryError{
message: "enroll failed: " + err.Error(),
nodeInvalid: true,
}
}
nodeKey, err := server.GenerateRandomText(svc.config.Osquery.NodeKeySize)
if err != nil {
return "", osqueryError{
message: "generate node key failed: " + err.Error(),
nodeInvalid: true,
}
}
hostIdentifier = getHostIdentifier(svc.logger, svc.config.Osquery.HostIdentifier, hostIdentifier, hostDetails)
host, err := svc.ds.EnrollHost(hostIdentifier, nodeKey, secret.TeamID, svc.config.Osquery.EnrollCooldown)
if err != nil {
return "", osqueryError{message: "save enroll failed: " + err.Error(), nodeInvalid: true}
}
// Save enrollment details if provided
save := false
if r, ok := hostDetails["os_version"]; ok {
detailQueries["os_version"].IngestFunc(svc.logger, host, []map[string]string{r})
save = true
}
if r, ok := hostDetails["osquery_info"]; ok {
detailQueries["osquery_info"].IngestFunc(svc.logger, host, []map[string]string{r})
save = true
}
if r, ok := hostDetails["system_info"]; ok {
detailQueries["system_info"].IngestFunc(svc.logger, host, []map[string]string{r})
save = true
}
if save {
if err := svc.ds.SaveHost(host); err != nil {
return "", osqueryError{message: "saving host details: " + err.Error(), nodeInvalid: true}
}
}
return host.NodeKey, nil
}
func getHostIdentifier(logger log.Logger, identifierOption, providedIdentifier string, details map[string](map[string]string)) string {
switch identifierOption {
case "provided":
// Use the host identifier already provided in the request.
return providedIdentifier
case "instance":
r, ok := details["osquery_info"]
if !ok {
level.Info(logger).Log(
"msg", "could not get host identifier",
"reason", "missing osquery_info",
"identifier", "instance",
)
} else if r["instance_id"] == "" {
level.Info(logger).Log(
"msg", "could not get host identifier",
"reason", "missing instance_id in osquery_info",
"identifier", "instance",
)
} else {
return r["instance_id"]
}
case "uuid":
r, ok := details["osquery_info"]
if !ok {
level.Info(logger).Log(
"msg", "could not get host identifier",
"reason", "missing osquery_info",
"identifier", "uuid",
)
} else if r["uuid"] == "" {
level.Info(logger).Log(
"msg", "could not get host identifier",
"reason", "missing instance_id in osquery_info",
"identifier", "uuid",
)
} else {
return r["uuid"]
}
case "hostname":
r, ok := details["system_info"]
if !ok {
level.Info(logger).Log(
"msg", "could not get host identifier",
"reason", "missing system_info",
"identifier", "hostname",
)
} else if r["hostname"] == "" {
level.Info(logger).Log(
"msg", "could not get host identifier",
"reason", "missing instance_id in system_info",
"identifier", "hostname",
)
} else {
return r["hostname"]
}
default:
panic("Unknown option for host_identifier: " + identifierOption)
}
return providedIdentifier
}
func (svc *Service) GetClientConfig(ctx context.Context) (map[string]interface{}, error) {
// skipauth: Authorization is currently for user endpoints only.
svc.authz.SkipAuthorization(ctx)
host, ok := hostctx.FromContext(ctx)
if !ok {
return nil, osqueryError{message: "internal error: missing host from request context"}
}
baseConfig, err := svc.AgentOptionsForHost(ctx, &host)
if err != nil {
return nil, osqueryError{message: "internal error: fetch base config: " + err.Error()}
}
var config map[string]interface{}
err = json.Unmarshal(baseConfig, &config)
if err != nil {
return nil, osqueryError{message: "internal error: parse base configuration: " + err.Error()}
}
packs, err := svc.ds.ListPacksForHost(host.ID)
if err != nil {
return nil, osqueryError{message: "database error: " + err.Error()}
}
packConfig := fleet.Packs{}
for _, pack := range packs {
// first, we must figure out what queries are in this pack
queries, err := svc.ds.ListScheduledQueriesInPack(pack.ID, fleet.ListOptions{})
if err != nil {
return nil, osqueryError{message: "database error: " + err.Error()}
}
// the serializable osquery config struct expects content in a
// particular format, so we do the conversion here
configQueries := fleet.Queries{}
for _, query := range queries {
queryContent := fleet.QueryContent{
Query: query.Query,
Interval: query.Interval,
Platform: query.Platform,
Version: query.Version,
Removed: query.Removed,
Shard: query.Shard,
Denylist: query.Denylist,
}
if query.Removed != nil {
queryContent.Removed = query.Removed
}
if query.Snapshot != nil && *query.Snapshot {
queryContent.Snapshot = query.Snapshot
}
configQueries[query.Name] = queryContent
}
// finally, we add the pack to the client config struct with all of
// the pack's queries
packConfig[pack.Name] = fleet.PackContent{
Platform: pack.Platform,
Queries: configQueries,
}
}
if len(packConfig) > 0 {
packJSON, err := json.Marshal(packConfig)
if err != nil {
return nil, osqueryError{message: "internal error: marshal pack JSON: " + err.Error()}
}
config["packs"] = json.RawMessage(packJSON)
}
// Save interval values if they have been updated.
saveHost := false
if options, ok := config["options"].(map[string]interface{}); ok {
distributedIntervalVal, ok := options["distributed_interval"]
distributedInterval, err := cast.ToUintE(distributedIntervalVal)
if ok && err == nil && host.DistributedInterval != distributedInterval {
host.DistributedInterval = distributedInterval
saveHost = true
}
loggerTLSPeriodVal, ok := options["logger_tls_period"]
loggerTLSPeriod, err := cast.ToUintE(loggerTLSPeriodVal)
if ok && err == nil && host.LoggerTLSPeriod != loggerTLSPeriod {
host.LoggerTLSPeriod = loggerTLSPeriod
saveHost = true
}
// Note config_tls_refresh can only be set in the osquery flags (and has
// also been deprecated in osquery for quite some time) so is ignored
// here.
configRefreshVal, ok := options["config_refresh"]
configRefresh, err := cast.ToUintE(configRefreshVal)
if ok && err == nil && host.ConfigTLSRefresh != configRefresh {
host.ConfigTLSRefresh = configRefresh
saveHost = true
}
}
if saveHost {
err := svc.ds.SaveHost(&host)
if err != nil {
return nil, err
}
}
return config, nil
}
func (svc *Service) SubmitStatusLogs(ctx context.Context, logs []json.RawMessage) error {
// skipauth: Authorization is currently for user endpoints only.
svc.authz.SkipAuthorization(ctx)
if err := svc.osqueryLogWriter.Status.Write(ctx, logs); err != nil {
return osqueryError{message: "error writing status logs: " + err.Error()}
}
return nil
}
func (svc *Service) SubmitResultLogs(ctx context.Context, logs []json.RawMessage) error {
// skipauth: Authorization is currently for user endpoints only.
svc.authz.SkipAuthorization(ctx)
if err := svc.osqueryLogWriter.Result.Write(ctx, logs); err != nil {
return osqueryError{message: "error writing result logs: " + err.Error()}
}
return nil
}
// hostLabelQueryPrefix is appended before the query name when a query is
// provided as a label query. This allows the results to be retrieved when
// osqueryd writes the distributed query results.
const hostLabelQueryPrefix = "fleet_label_query_"
// hostDetailQueryPrefix is appended before the query name when a query is
// provided as a detail query.
const hostDetailQueryPrefix = "fleet_detail_query_"
// hostAdditionalQueryPrefix is appended before the query name when a query is
// provided as an additional query (additional info for hosts to retrieve).
const hostAdditionalQueryPrefix = "fleet_additional_query_"
// hostDistributedQueryPrefix is appended before the query name when a query is
// run from a distributed query campaign
const hostDistributedQueryPrefix = "fleet_distributed_query_"
type detailQuery struct {
Query string
// Platforms is a list of platforms to run the query on. If this value is
// empty, run on all platforms.
Platforms []string
IngestFunc func(logger log.Logger, host *fleet.Host, rows []map[string]string) error
}
// runForPlatform determines whether this detail query should run on the given platform
func (q *detailQuery) runForPlatform(platform string) bool {
if len(q.Platforms) == 0 {
return true
}
for _, p := range q.Platforms {
if p == platform {
return true
}
}
return false
}
// detailQueries defines the detail queries that should be run on the host, as
// well as how the results of those queries should be ingested into the
// fleet.Host data model. This map should not be modified at runtime.
var detailQueries = map[string]detailQuery{
"network_interface": {
Query: `select address, mac
from interface_details id join interface_addresses ia
on ia.interface = id.interface where length(mac) > 0
order by (ibytes + obytes) desc`,
IngestFunc: func(logger log.Logger, host *fleet.Host, rows []map[string]string) (err error) {
if len(rows) == 0 {
logger.Log("component", "service", "method", "IngestFunc", "err",
"detail_query_network_interface expected 1 or more results")
return nil
}
// Rows are ordered by traffic, so we will get the most active
// interface by iterating in order
var firstIPv4, firstIPv6 map[string]string
for _, row := range rows {
ip := net.ParseIP(row["address"])
if ip == nil {
continue
}
// Skip link-local and loopback interfaces
if ip.IsLinkLocalUnicast() || ip.IsLoopback() {
continue
}
if strings.Contains(row["address"], ":") {
//IPv6
if firstIPv6 == nil {
firstIPv6 = row
}
} else {
// IPv4
if firstIPv4 == nil {
firstIPv4 = row
}
}
}
var selected map[string]string
switch {
// Prefer IPv4
case firstIPv4 != nil:
selected = firstIPv4
// Otherwise IPv6
case firstIPv6 != nil:
selected = firstIPv6
// If only link-local and loopback found, still use the first
// interface so that we don't get an empty value.
default:
selected = rows[0]
}
host.PrimaryIP = selected["address"]
host.PrimaryMac = selected["mac"]
return nil
},
},
"os_version": {
Query: "select * from os_version limit 1",
IngestFunc: func(logger log.Logger, host *fleet.Host, rows []map[string]string) error {
if len(rows) != 1 {
logger.Log("component", "service", "method", "IngestFunc", "err",
fmt.Sprintf("detail_query_os_version expected single result got %d", len(rows)))
return nil
}
host.OSVersion = fmt.Sprintf(
"%s %s.%s.%s",
rows[0]["name"],
rows[0]["major"],
rows[0]["minor"],
rows[0]["patch"],
)
host.OSVersion = strings.Trim(host.OSVersion, ".")
if build, ok := rows[0]["build"]; ok {
host.Build = build
}
host.Platform = rows[0]["platform"]
host.PlatformLike = rows[0]["platform_like"]
host.CodeName = rows[0]["code_name"]
// On centos6 there is an osquery bug that leaves
// platform empty. Here we workaround.
if host.Platform == "" &&
strings.Contains(strings.ToLower(rows[0]["name"]), "centos") {
host.Platform = "centos"
}
return nil
},
},
"osquery_flags": {
// Collect the interval info (used for online status
// calculation) from the osquery flags. We typically control
// distributed_interval (but it's not required), and typically
// do not control config_tls_refresh.
Query: `select name, value from osquery_flags where name in ("distributed_interval", "config_tls_refresh", "config_refresh", "logger_tls_period")`,
IngestFunc: func(logger log.Logger, host *fleet.Host, rows []map[string]string) error {
var configTLSRefresh, configRefresh uint
var configRefreshSeen, configTLSRefreshSeen bool
for _, row := range rows {
switch row["name"] {
case "distributed_interval":
interval, err := strconv.Atoi(emptyToZero(row["value"]))
if err != nil {
return errors.Wrap(err, "parsing distributed_interval")
}
host.DistributedInterval = uint(interval)
case "config_tls_refresh":
// Prior to osquery 2.4.6, the flag was
// called `config_tls_refresh`.
interval, err := strconv.Atoi(emptyToZero(row["value"]))
if err != nil {
return errors.Wrap(err, "parsing config_tls_refresh")
}
configTLSRefresh = uint(interval)
configTLSRefreshSeen = true
case "config_refresh":
// After 2.4.6 `config_tls_refresh` was
// aliased to `config_refresh`.
interval, err := strconv.Atoi(emptyToZero(row["value"]))
if err != nil {
return errors.Wrap(err, "parsing config_refresh")
}
configRefresh = uint(interval)
configRefreshSeen = true
case "logger_tls_period":
interval, err := strconv.Atoi(emptyToZero(row["value"]))
if err != nil {
return errors.Wrap(err, "parsing logger_tls_period")
}
host.LoggerTLSPeriod = uint(interval)
}
}
// Since the `config_refresh` flag existed prior to
// 2.4.6 and had a different meaning, we prefer
// `config_tls_refresh` if it was set, and use
// `config_refresh` as a fallback.
if configTLSRefreshSeen {
host.ConfigTLSRefresh = configTLSRefresh
} else if configRefreshSeen {
host.ConfigTLSRefresh = configRefresh
}
return nil
},
},
"osquery_info": {
Query: "select * from osquery_info limit 1",
IngestFunc: func(logger log.Logger, host *fleet.Host, rows []map[string]string) error {
if len(rows) != 1 {
logger.Log("component", "service", "method", "IngestFunc", "err",
fmt.Sprintf("detail_query_osquery_info expected single result got %d", len(rows)))
return nil
}
host.OsqueryVersion = rows[0]["version"]
return nil
},
},
"system_info": {
Query: "select * from system_info limit 1",
IngestFunc: func(logger log.Logger, host *fleet.Host, rows []map[string]string) error {
if len(rows) != 1 {
logger.Log("component", "service", "method", "IngestFunc", "err",
fmt.Sprintf("detail_query_system_info expected single result got %d", len(rows)))
return nil
}
var err error
host.Memory, err = strconv.ParseInt(emptyToZero(rows[0]["physical_memory"]), 10, 64)
if err != nil {
return err
}
host.Hostname = rows[0]["hostname"]
host.UUID = rows[0]["uuid"]
host.CPUType = rows[0]["cpu_type"]
host.CPUSubtype = rows[0]["cpu_subtype"]
host.CPUBrand = rows[0]["cpu_brand"]
host.CPUPhysicalCores, err = strconv.Atoi(emptyToZero(rows[0]["cpu_physical_cores"]))
if err != nil {
return err
}
host.CPULogicalCores, err = strconv.Atoi(emptyToZero(rows[0]["cpu_logical_cores"]))
if err != nil {
return err
}
host.HardwareVendor = rows[0]["hardware_vendor"]
host.HardwareModel = rows[0]["hardware_model"]
host.HardwareVersion = rows[0]["hardware_version"]
host.HardwareSerial = rows[0]["hardware_serial"]
host.ComputerName = rows[0]["computer_name"]
return nil
},
},
"uptime": {
Query: "select * from uptime limit 1",
IngestFunc: func(logger log.Logger, host *fleet.Host, rows []map[string]string) error {
if len(rows) != 1 {
logger.Log("component", "service", "method", "IngestFunc", "err",
fmt.Sprintf("detail_query_uptime expected single result got %d", len(rows)))
return nil
}
uptimeSeconds, err := strconv.Atoi(emptyToZero(rows[0]["total_seconds"]))
if err != nil {
return err
}
host.Uptime = time.Duration(uptimeSeconds) * time.Second
return nil
},
},
"software_macos": {
Query: `
SELECT
name AS name,
bundle_short_version AS version,
'Application (macOS)' AS type,
'apps' AS source
FROM apps
UNION
SELECT
name AS name,
version AS version,
'Package (Python)' AS type,
'python_packages' AS source
FROM python_packages
UNION
SELECT
name AS name,
version AS version,
'Browser plugin (Chrome)' AS type,
'chrome_extensions' AS source
FROM chrome_extensions
UNION
SELECT
name AS name,
version AS version,
'Browser plugin (Firefox)' AS type,
'firefox_addons' AS source
FROM firefox_addons
UNION
SELECT
name As name,
version AS version,
'Browser plugin (Safari)' AS type,
'safari_extensions' AS source
FROM safari_extensions
UNION
SELECT
name AS name,
version AS version,
'Package (Homebrew)' AS type,
'homebrew_packages' AS source
FROM homebrew_packages;
`,
Platforms: []string{"darwin"},
IngestFunc: ingestSoftware,
},
"software_linux": {
Query: `
SELECT
name AS name,
version AS version,
'Package (deb)' AS type,
'deb_packages' AS source
FROM deb_packages
UNION
SELECT
package AS name,
version AS version,
'Package (Portage)' AS type,
'portage_packages' AS source
FROM portage_packages
UNION
SELECT
name AS name,
version AS version,
'Package (RPM)' AS type,
'rpm_packages' AS source
FROM rpm_packages
UNION
SELECT
name AS name,
version AS version,
'Package (NPM)' AS type,
'npm_packages' AS source
FROM npm_packages
UNION
SELECT
name AS name,
version AS version,
'Package (Atom)' AS type,
'atom_packages' AS source
FROM atom_packages
UNION
SELECT
name AS name,
version AS version,
'Package (Python)' AS type,
'python_packages' AS source
FROM python_packages;
`,
Platforms: []string{"linux", "rhel", "ubuntu", "centos"},
IngestFunc: ingestSoftware,
},
"software_windows": {
Query: `
SELECT
name AS name,
version AS version,
'Program (Windows)' AS type,
'programs' AS source
FROM programs
UNION
SELECT
name AS name,
version AS version,
'Package (Python)' AS type,
'python_packages' AS source
FROM python_packages
UNION
SELECT
name AS name,
version AS version,
'Browser plugin (IE)' AS type,
'ie_extensions' AS source
FROM ie_extensions
UNION
SELECT
name AS name,
version AS version,
'Browser plugin (Chrome)' AS type,
'chrome_extensions' AS source
FROM chrome_extensions
UNION
SELECT
name AS name,
version AS version,
'Browser plugin (Firefox)' AS type,
'firefox_addons' AS source
FROM firefox_addons
UNION
SELECT
name AS name,
version AS version,
'Package (Chocolatey)' AS type,
'chocolatey_packages' AS source
FROM chocolatey_packages
UNION
SELECT
name AS name,
version AS version,
'Package (Atom)' AS type,
'atom_packages' AS source
FROM atom_packages
UNION
SELECT
name AS name,
version AS version,
'Package (Python)' AS type,
'python_packages' AS source
FROM python_packages;
`,
Platforms: []string{"windows"},
IngestFunc: ingestSoftware,
},
"scheduled_query_stats": {
Query: `
SELECT *,
(SELECT value from osquery_flags where name = 'pack_delimiter') AS delimiter
FROM osquery_schedule
`,
IngestFunc: func(logger log.Logger, host *fleet.Host, rows []map[string]string) error {
packs := map[string][]fleet.ScheduledQueryStats{}
for _, row := range rows {
providedName := row["name"]
if providedName == "" {
level.Debug(logger).Log(
"msg", "host reported scheduled query with empty name",
"host", host.Hostname,
)
continue
}
delimiter := row["delimiter"]
if delimiter == "" {
level.Debug(logger).Log(
"msg", "host reported scheduled query with empty delimiter",
"host", host.Hostname,
)
continue
}
// Split with a limit of 2 in case query name includes the
// delimiter. Not much we can do if pack name includes the
// delimiter.
trimmedName := strings.TrimPrefix(providedName, "pack"+delimiter)
parts := strings.SplitN(trimmedName, delimiter, 2)
if len(parts) != 2 {
level.Debug(logger).Log(
"msg", "could not split pack and query names",
"host", host.Hostname,
"name", providedName,
"delimiter", delimiter,
)
continue
}
packName, scheduledName := parts[0], parts[1]
stats := fleet.ScheduledQueryStats{
ScheduledQueryName: scheduledName,
PackName: packName,
AverageMemory: cast.ToInt(row["average_memory"]),
Denylisted: cast.ToBool(row["denylisted"]),
Executions: cast.ToInt(row["executions"]),
Interval: cast.ToInt(row["interval"]),
// Cast to int first to allow cast.ToTime to interpret the unix timestamp.
LastExecuted: time.Unix(cast.ToInt64(row["last_executed"]), 0).UTC(),
OutputSize: cast.ToInt(row["output_size"]),
SystemTime: cast.ToInt(row["system_time"]),
UserTime: cast.ToInt(row["user_time"]),
WallTime: cast.ToInt(row["wall_time"]),
}
packs[packName] = append(packs[packName], stats)
}
host.PackStats = []fleet.PackStats{}
for packName, stats := range packs {
host.PackStats = append(
host.PackStats,
fleet.PackStats{
PackName: packName,
QueryStats: stats,
},
)
}
return nil
},
},
"users": {
Query: `SELECT uid, username, type, groupname FROM users u JOIN groups g ON g.gid=u.gid;`,
IngestFunc: func(logger log.Logger, host *fleet.Host, rows []map[string]string) error {
var users []fleet.HostUser
for _, row := range rows {
uid, err := strconv.Atoi(row["uid"])
if err != nil {
return errors.Wrapf(err, "converting uid %s to int", row["uid"])
}
username := row["username"]
type_ := row["type"]
groupname := row["groupname"]
u := fleet.HostUser{
Uid: uint(uid),
Username: username,
Type: type_,
GroupName: groupname,
}
users = append(users, u)
}
host.Users = users
return nil
},
},
}
func ingestSoftware(logger log.Logger, host *fleet.Host, rows []map[string]string) error {
software := fleet.HostSoftware{Modified: true}
for _, row := range rows {
name := row["name"]
version := row["version"]
source := row["source"]
if name == "" {
level.Debug(logger).Log(
"msg", "host reported software with empty name",
"host", host.Hostname,
"version", version,
"source", source,
)
continue
}
if source == "" {
level.Debug(logger).Log(
"msg", "host reported software with empty name",
"host", host.Hostname,
"version", version,
"name", name,
)
continue
}
s := fleet.Software{Name: name, Version: version, Source: source}
software.Software = append(software.Software, s)
}
host.HostSoftware = software
return nil
}
// hostDetailQueries returns the map of queries that should be executed by
// osqueryd to fill in the host details
func (svc *Service) hostDetailQueries(host fleet.Host) (map[string]string, error) {
queries := make(map[string]string)
if host.DetailUpdatedAt.After(svc.clock.Now().Add(-svc.config.Osquery.DetailUpdateInterval)) && !host.RefetchRequested {
// No need to update already fresh details
return queries, nil
}
for name, query := range detailQueries {
if query.runForPlatform(host.Platform) {
if strings.HasPrefix(name, "software_") {
// Feature flag this because of as-yet-untested performance
// considerations.
if os.Getenv("FLEET_BETA_SOFTWARE_INVENTORY") == "" {
continue
}
}
queries[hostDetailQueryPrefix+name] = query.Query
}
}
// Get additional queries
config, err := svc.ds.AppConfig()
if err != nil {
return nil, osqueryError{message: "get additional queries: " + err.Error()}
}
if config.AdditionalQueries == nil {
// No additional queries set
return queries, nil
}
var additionalQueries map[string]string
if err := json.Unmarshal(*config.AdditionalQueries, &additionalQueries); err != nil {
return nil, osqueryError{message: "unmarshal additional queries: " + err.Error()}
}
for name, query := range additionalQueries {
queries[hostAdditionalQueryPrefix+name] = query
}
return queries, nil
}
func (svc *Service) GetDistributedQueries(ctx context.Context) (map[string]string, uint, error) {
// skipauth: Authorization is currently for user endpoints only.
svc.authz.SkipAuthorization(ctx)
host, ok := hostctx.FromContext(ctx)
if !ok {
return nil, 0, osqueryError{message: "internal error: missing host from request context"}
}
queries, err := svc.hostDetailQueries(host)
if err != nil {
return nil, 0, err
}
// Retrieve the label queries that should be updated
cutoff := svc.clock.Now().Add(-svc.config.Osquery.LabelUpdateInterval)
labelQueries, err := svc.ds.LabelQueriesForHost(&host, cutoff)
if err != nil {
return nil, 0, osqueryError{message: "retrieving label queries: " + err.Error()}
}
for name, query := range labelQueries {
queries[hostLabelQueryPrefix+name] = query
}
liveQueries, err := svc.liveQueryStore.QueriesForHost(host.ID)
if err != nil {
return nil, 0, osqueryError{message: "retrieve live queries: " + err.Error()}
}
for name, query := range liveQueries {
queries[hostDistributedQueryPrefix+name] = query
}
accelerate := uint(0)
if host.Hostname == "" || host.Platform == "" {
// Assume this host is just enrolling, and accelerate checkins
// (to allow for platform restricted labels to run quickly
// after platform is retrieved from details)
accelerate = 10
}
return queries, accelerate, nil
}
// ingestDetailQuery takes the results of a detail query and modifies the
// provided fleet.Host appropriately.
func (svc *Service) ingestDetailQuery(host *fleet.Host, name string, rows []map[string]string) error {
trimmedQuery := strings.TrimPrefix(name, hostDetailQueryPrefix)
query, ok := detailQueries[trimmedQuery]
if !ok {
return osqueryError{message: "unknown detail query " + trimmedQuery}
}
err := query.IngestFunc(svc.logger, host, rows)
if err != nil {
return osqueryError{
message: fmt.Sprintf("ingesting query %s: %s", name, err.Error()),
}
}
// Refetch is no longer needed after ingesting details.
host.RefetchRequested = false
return nil
}
// ingestLabelQuery records the results of label queries run by a host
func (svc *Service) ingestLabelQuery(host fleet.Host, query string, rows []map[string]string, results map[uint]bool) error {
trimmedQuery := strings.TrimPrefix(query, hostLabelQueryPrefix)
trimmedQueryNum, err := strconv.Atoi(emptyToZero(trimmedQuery))
if err != nil {
return errors.Wrap(err, "converting query from string to int")
}
// A label query matches if there is at least one result for that
// query. We must also store negative results.
results[uint(trimmedQueryNum)] = len(rows) > 0
return nil
}
// ingestDistributedQuery takes the results of a distributed query and modifies the
// provided fleet.Host appropriately.
func (svc *Service) ingestDistributedQuery(host fleet.Host, name string, rows []map[string]string, failed bool, errMsg string) error {
trimmedQuery := strings.TrimPrefix(name, hostDistributedQueryPrefix)
campaignID, err := strconv.Atoi(emptyToZero(trimmedQuery))
if err != nil {
return osqueryError{message: "unable to parse campaign ID: " + trimmedQuery}
}
// Write the results to the pubsub store
res := fleet.DistributedQueryResult{
DistributedQueryCampaignID: uint(campaignID),
Host: host,
Rows: rows,
}
if failed {
res.Error = &errMsg
}
err = svc.resultStore.WriteResult(res)
if err != nil {
nErr, ok := err.(pubsub.Error)
if !ok || !nErr.NoSubscriber() {
return osqueryError{message: "writing results: " + err.Error()}
}
// If there are no subscribers, the campaign is "orphaned"
// and should be closed so that we don't continue trying to
// execute that query when we can't write to any subscriber
campaign, err := svc.ds.DistributedQueryCampaign(uint(campaignID))
if err != nil {
if err := svc.liveQueryStore.StopQuery(strconv.Itoa(int(campaignID))); err != nil {
return osqueryError{message: "stop orphaned campaign after load failure: " + err.Error()}
}
return osqueryError{message: "loading orphaned campaign: " + err.Error()}
}
if campaign.CreatedAt.After(svc.clock.Now().Add(-5 * time.Second)) {
// Give the client 5 seconds to connect before considering the
// campaign orphaned
return osqueryError{message: "campaign waiting for listener (please retry)"}
}
if campaign.Status != fleet.QueryComplete {
campaign.Status = fleet.QueryComplete
if err := svc.ds.SaveDistributedQueryCampaign(campaign); err != nil {
return osqueryError{message: "closing orphaned campaign: " + err.Error()}
}
}
if err := svc.liveQueryStore.StopQuery(strconv.Itoa(int(campaignID))); err != nil {
return osqueryError{message: "stopping orphaned campaign: " + err.Error()}
}
// No need to record query completion in this case
return nil
}
err = svc.liveQueryStore.QueryCompletedByHost(strconv.Itoa(int(campaignID)), host.ID)
if err != nil {
return osqueryError{message: "record query completion: " + err.Error()}
}
return nil
}
func (svc *Service) SubmitDistributedQueryResults(ctx context.Context, results fleet.OsqueryDistributedQueryResults, statuses map[string]fleet.OsqueryStatus, messages map[string]string) error {
// skipauth: Authorization is currently for user endpoints only.
svc.authz.SkipAuthorization(ctx)
host, ok := hostctx.FromContext(ctx)
if !ok {
return osqueryError{message: "internal error: missing host from request context"}
}
// Check for label queries and if so, load host additional. If we don't do
// this, we will end up unintentionally dropping any existing host
// additional info.
for query := range results {
if strings.HasPrefix(query, hostLabelQueryPrefix) {
fullHost, err := svc.ds.Host(host.ID)
if err != nil {
return osqueryError{message: "internal error: load host additional: " + err.Error()}
}
host = *fullHost
break
}
}
var err error
detailUpdated := false // Whether detail or additional was updated
additionalResults := make(fleet.OsqueryDistributedQueryResults)
labelResults := map[uint]bool{}
for query, rows := range results {
switch {
case strings.HasPrefix(query, hostDetailQueryPrefix):
err = svc.ingestDetailQuery(&host, query, rows)
detailUpdated = true
case strings.HasPrefix(query, hostAdditionalQueryPrefix):
name := strings.TrimPrefix(query, hostAdditionalQueryPrefix)
additionalResults[name] = rows
detailUpdated = true
case strings.HasPrefix(query, hostLabelQueryPrefix):
err = svc.ingestLabelQuery(host, query, rows, labelResults)
case strings.HasPrefix(query, hostDistributedQueryPrefix):
// osquery docs say any nonzero (string) value for
// status indicates a query error
status, ok := statuses[query]
failed := (ok && status != fleet.StatusOK)
err = svc.ingestDistributedQuery(host, query, rows, failed, messages[query])
default:
err = osqueryError{message: "unknown query prefix: " + query}
}
if err != nil {
return osqueryError{message: "failed to ingest result: " + err.Error()}
}
}
if len(labelResults) > 0 {
host.Modified = true
host.LabelUpdatedAt = svc.clock.Now()
err = svc.ds.RecordLabelQueryExecutions(&host, labelResults, svc.clock.Now())
if err != nil {
return osqueryError{message: "failed to save labels: " + err.Error()}
}
}
if detailUpdated {
host.Modified = true
host.DetailUpdatedAt = svc.clock.Now()
additionalJSON, err := json.Marshal(additionalResults)
if err != nil {
return osqueryError{message: "failed to marshal additional: " + err.Error()}
}
additional := json.RawMessage(additionalJSON)
host.Additional = &additional
}
if host.Modified {
err = svc.ds.SaveHost(&host)
if err != nil {
return osqueryError{message: "failed to update host details: " + err.Error()}
}
}
return nil
}
|
[
"\"FLEET_BETA_SOFTWARE_INVENTORY\""
] |
[] |
[
"FLEET_BETA_SOFTWARE_INVENTORY"
] |
[]
|
["FLEET_BETA_SOFTWARE_INVENTORY"]
|
go
| 1 | 0 | |
example_app/aws_authorizer.py
|
"""
AWS API-Gateway Authorizer
==========================
This authorizer is designed to be attached to an AWS API-Gateway, as a
Lambda authorizer. It assumes that AWS Cognito is used to authenticate
a client (UI) and then API requests will pass a JSON Web Token to be
validated for authorization of API method calls. The initial designs
for authorization are very limited in scope.
This auth module is using a recent release of jwcrypto for several reasons:
- jwcrypto supports all JOSE features (see jwt.io libs for python)
- jwcrypto has well designed and documented APIs (python-jose does not)
- it can generate keys as well as other functions for JOSE
.. seealso::
- https://jwcrypto.readthedocs.io/en/latest/index.html
- https://auth0.com/docs/tokens/concepts/jwts
- https://jwt.io/
- https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-jwt-authorizer.html
- https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-verifying-a-jwt.html
- https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
License
*******
This auth module is a derivative of various sources of JWT documentation and
source code samples that are covered by the Apache License, Version 2.0.
Copyright 2015-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use
this file except in compliance with the License. A copy of the License is
located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and
limitations under the License.
"""
# WARNING: moto provides python-jose as a dev-dep, which is not part of
# the app-deps and should not be used in this auth module, that is,
# do not use imports like these:
# from jose import jwt
# from jose import jwk
import json
import os
import re
from typing import Dict
import jwcrypto
import jwcrypto.jwk
import jwcrypto.jwt
import requests
from dataclasses import dataclass
from example_app.logger import get_logger
LOGGER = get_logger(__name__)
API_ADMIN_EMAILS = [
email.strip() for email in os.getenv("API_ADMIN_EMAILS", "").split(",")
]
COGNITO_REGION = os.getenv("API_COGNITO_REGION", "us-west-2")
COGNITO_CLIENT_ID = os.getenv("API_COGNITO_CLIENT_ID")
COGNITO_POOL_ID = os.getenv("API_COGNITO_POOL_ID")
@dataclass
class AuthError(Exception):
error: str
status_code: int
@dataclass
class CognitoPool:
id: str
client_id: str
region: str
_jwks: Dict = None
@property
def jwks_uri(self) -> str:
return "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format(
self.region, self.id
)
@property
def jwks(self) -> Dict:
if self._jwks is None:
LOGGER.debug(self.jwks_uri)
response = requests.get(self.jwks_uri)
LOGGER.debug(response)
response.raise_for_status()
# use jwcrypto to parse the JWKS (it takes a json string)
jwks = jwcrypto.jwk.JWKSet.from_json(response.text)
self._jwks = json.loads(jwks.export())
LOGGER.debug(self._jwks)
return self._jwks
@staticmethod
def jwt_decode(jwt_token: str):
try:
jwt_headers, jwt_payload, jwt_signature = jwt_token.split(".")
if not isinstance(jwt_headers, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
if not isinstance(jwt_payload, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
if not isinstance(jwt_signature, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
unverified_token = jwcrypto.jwt.JWT(jwt=jwt_token)
jwt_headers = unverified_token.token.jose_header
if not isinstance(jwt_headers, dict):
raise AuthError("Unauthorized - JWT has malformed headers", 401)
if not jwt_headers.get("alg"):
raise AuthError("Unauthorized - JWT-alg is not in headers", 401)
if not jwt_headers.get("kid"):
raise AuthError("Unauthorized - JWT-kid is not in headers", 401)
jwt_payload = unverified_token.token.objects["payload"].decode("utf-8")
jwt_payload = json.loads(jwt_payload)
if not isinstance(jwt_payload, dict):
raise AuthError("Unauthorized - JWT has malformed payload", 401)
if not jwt_payload.get("token_use") in ["id", "access"]:
raise AuthError("Unauthorized - JWT has malformed payload", 401)
return jwt_headers, jwt_payload, jwt_signature
except Exception as err:
LOGGER.error(err)
raise AuthError("Unauthorized - JWT is malformed", 401)
def jwt_public_key(self, jwt_token: str):
unverified_token = jwcrypto.jwt.JWT(jwt=jwt_token)
jwt_headers = unverified_token.token.jose_header
kid = jwt_headers.get("kid")
if kid is None:
raise AuthError("Unauthorized - JWT-kid is missing", 401)
LOGGER.debug(kid)
for pub_key in self.jwks.get("keys"):
if kid == pub_key.get("kid"):
LOGGER.info("JWT-kid has matching public-kid")
return pub_key
raise AuthError("Unauthorized - JWT-kid has no matching public-kid", 401)
def jwt_claims(self, jwt_token: str):
try:
public_key = self.jwt_public_key(jwt_token)
public_jwk = jwcrypto.jwk.JWK(**public_key)
verified_token = jwcrypto.jwt.JWT(
key=public_jwk, jwt=jwt_token, algs=[public_key["alg"]]
)
return json.loads(verified_token.claims)
except Exception as err:
LOGGER.error(err)
raise AuthError("Unauthorized - token failed to verify", 401)
COGNITO_POOL = CognitoPool(
region=COGNITO_REGION, client_id=COGNITO_CLIENT_ID, id=COGNITO_POOL_ID
)
if os.getenv("AWS_EXECUTION_ENV"):
# instead of re-downloading the public keys every time, memoize them only on cold start
# https://aws.amazon.com/blogs/compute/container-reuse-in-lambda/
# https://docs.aws.amazon.com/lambda/latest/dg/runtimes-context.html
assert COGNITO_POOL.jwks
@dataclass
class APIGateway:
aws_region: str
aws_account_id: str
api_gateway_arn: str
rest_api_id: str
rest_api_stage: str
@staticmethod
def from_method_arn(method_arn):
tmp = method_arn.split(":")
api_gateway_arn = tmp[5].split("/")
return APIGateway(
aws_region=tmp[3],
aws_account_id=tmp[4],
api_gateway_arn=tmp[5],
rest_api_id=api_gateway_arn[0],
rest_api_stage=api_gateway_arn[1],
)
def get_auth_policy(self, principal_id: str):
policy = AuthPolicy(principal_id, self.aws_account_id)
policy.restApiId = self.rest_api_id
policy.stage = self.rest_api_stage
policy.region = self.aws_region
return policy
def aws_auth_handler(event, context):
"""AWS Authorizer for JWT tokens provided by AWS Cognito
event should have this form:
{
"type": "TOKEN",
"authorizationToken": "{caller-supplied-token}",
"methodArn": "arn:aws:execute-api:{regionId}:{accountId}:{apiId}/{stage}/{httpVerb}/[{resource}/[{child-resources}]]"
}
.. seealso::
- https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html
"""
LOGGER.debug("event: %s", event)
LOGGER.debug("context: %s", context)
try:
# validate the incoming token
# and produce the principal user identifier associated with the token
# this could be accomplished in a number of ways:
# 1. Call out to OAuth provider
# 2. Decode a JWT token inline
# 3. Lookup in a self-managed DB
# TODO: try 2. Decode a JWT token inline
# https://docs.authlib.org/en/stable/jose/index.html
# https://aws.amazon.com/premiumsupport/knowledge-center/decode-verify-cognito-json-token/
# https://github.com/awslabs/aws-support-tools/tree/master/Cognito/decode-verify-jwt
# there are flask plugins for this, but the API-Gateway solution is different
# https://flask-jwt-extended.readthedocs.io/en/stable/basic_usage/
# https://auth0.com/docs/quickstart/backend/python
token = event.get("authorizationToken")
if token is None:
raise AuthError("Unauthorized - authorizationToken is missing", 401)
if token.startswith("Bearer"):
token = token.strip("Bearer").strip()
# TODO: handle a SigV4 token?
# 'authorizationToken': 'AWS4-HMAC-SHA256
# Credential=<secret_id>/20200529/us-west-2/execute-api/aws4_request,
# Signature=xyz'
claims = COGNITO_POOL.jwt_claims(token) # also validates JWT
issuer = claims.get("iss")
if not (COGNITO_POOL.region in issuer and COGNITO_POOL.id in issuer):
raise AuthError("Unauthorized - invalid issuer in JWT claims", 403)
if claims["token_use"] == "id":
audience = claims.get("aud")
if audience != COGNITO_POOL.client_id:
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
elif claims["token_use"] == "access":
client_id = claims.get("client_id")
if client_id != COGNITO_POOL.client_id:
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
else:
# token validation should check this already, so should not get here
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
if claims["token_use"] == "id":
principle_id = claims.get("email")
if not principle_id:
raise AuthError(
"Unauthorized - invalid principle-id in JWT claims", 403
)
if not claims.get("email_verified"):
raise AuthError(
"Unauthorized - email is not verified in JWT claims", 403
)
elif claims["token_use"] == "access":
principle_id = claims.get("username")
if not principle_id:
raise AuthError(
"Unauthorized - invalid principle-id in JWT claims", 403
)
else:
# token validation should check this already, so should not get here
raise AuthError("Unauthorized - invalid principle-id in JWT claims", 403)
# if the token is valid, a policy must be generated which will allow or deny
# access to the client
# if access is denied, the client will receive a 403 Access Denied response
# if access is allowed, API Gateway will proceed with the backend
# integration configured on the method that was called
# this function must generate a policy that is associated with the
# recognized principal user identifier. depending on your use case, you
# might store policies in a DB, or generate them on the fly
# keep in mind, the policy is cached for 5 minutes by default (TTL is
# configurable in the authorizer) and will apply to subsequent calls to any
# method/resource in the RestApi made with the same token
# the example policy below denies access to all resources in the RestApi
LOGGER.info("Method ARN: %s", event["methodArn"])
api_gateway = APIGateway.from_method_arn(event.get("methodArn"))
policy = api_gateway.get_auth_policy(principle_id)
policy.allowAllMethods() # a valid signed JWT is sufficient
#
# TODO: use cognito-groups with an JWT-access token?
#
if principle_id not in API_ADMIN_EMAILS:
policy.denyMethod(HttpVerb.GET, "/api/healthz")
# TODO: restrict the policy by additional options:
# #: The API Gateway API id. By default this is set to '*'
# restApiId = "*"
# #: The region where the API is deployed. By default this is set to '*'
# region = "*"
# #: The name of the stage used in the policy. By default this is set to '*'
# stage = "*"
# Finally, build the policy
auth_response = policy.build()
# # Add additional key-value pairs associated with the authenticated principal
# # these are made available by API-GW like so: $context.authorizer.<key>
# # additional context is cached
# context = {"key": "value", "number": 1, "bool": True} # $context.authorizer.key -> value
# # context['arr'] = ['foo'] <- this is invalid, API-GW will not accept it
# # context['obj'] = {'foo':'bar'} <- also invalid
# auth_response["context"] = context
# TODO: use "usageIdentifierKey": "{api-key}" for API-key use plans, if any.
return auth_response
except AuthError as auth_error:
if auth_error.status_code == 403:
api_gateway = APIGateway.from_method_arn(event.get("methodArn"))
policy = api_gateway.get_auth_policy("nobody")
policy.denyAllMethods()
auth_response = policy.build()
auth_response["error"] = auth_error.error
return auth_response
# API-GW requires the message text to be only "Unauthorized" for a 401
raise Exception("Unauthorized")
class HttpVerb:
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
HEAD = "HEAD"
DELETE = "DELETE"
OPTIONS = "OPTIONS"
ALL = "*"
class AuthPolicy(object):
#: The AWS account id the policy will be generated for.
#: This is used to create the method ARNs.
awsAccountId = ""
#: The principal used for the policy, this should be a unique identifier for the end user.
principalId = ""
#: The policy version used for the evaluation. This should always be '2012-10-17'
version = "2012-10-17"
#: The regular expression used to validate resource paths for the policy
pathRegex = "^[/.a-zA-Z0-9-\*]+$"
#: These are the internal lists of allowed and denied methods. These are lists
#: of objects and each object has 2 properties: A resource ARN and a nullable
#: conditions statement. The build method processes these lists and generates
#: the appropriate statements for the final policy
allowMethods = []
denyMethods = []
#: The API Gateway API id. By default this is set to '*'
restApiId = "*"
#: The region where the API is deployed. By default this is set to '*'
region = "*"
#: The name of the stage used in the policy. By default this is set to '*'
stage = "*"
def __init__(self, principal, awsAccountId):
self.awsAccountId = awsAccountId
self.principalId = principal
self.allowMethods = []
self.denyMethods = []
def _addMethod(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError(
"Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class"
)
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError(
"Invalid resource path: "
+ resource
+ ". Path should match "
+ self.pathRegex
)
if resource[:1] == "/":
resource = resource[1:]
resourceArn = (
"arn:aws:execute-api:"
+ self.region
+ ":"
+ self.awsAccountId
+ ":"
+ self.restApiId
+ "/"
+ self.stage
+ "/"
+ verb
+ "/"
+ resource
)
if effect.lower() == "allow":
self.allowMethods.append(
{"resourceArn": resourceArn, "conditions": conditions}
)
elif effect.lower() == "deny":
self.denyMethods.append(
{"resourceArn": resourceArn, "conditions": conditions}
)
def _getEmptyStatement(self, effect):
"""Returns an empty statement object prepopulated with the correct action and the
desired effect."""
statement = {
"Action": "execute-api:Invoke",
"Effect": effect[:1].upper() + effect[1:].lower(),
"Resource": [],
}
return statement
def _getStatementForEffect(self, effect, methods):
"""This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy."""
statements = []
if len(methods) > 0:
statement = self._getEmptyStatement(effect)
for curMethod in methods:
if curMethod["conditions"] is None or len(curMethod["conditions"]) == 0:
statement["Resource"].append(curMethod["resourceArn"])
else:
conditionalStatement = self._getEmptyStatement(effect)
conditionalStatement["Resource"].append(curMethod["resourceArn"])
conditionalStatement["Condition"] = curMethod["conditions"]
statements.append(conditionalStatement)
statements.append(statement)
return statements
def allowAllMethods(self):
"""Adds a '*' allow to the policy to authorize access to all methods of an API"""
self._addMethod("Allow", HttpVerb.ALL, "*", [])
def denyAllMethods(self):
"""Adds a '*' allow to the policy to deny access to all methods of an API"""
self._addMethod("Deny", HttpVerb.ALL, "*", [])
def allowMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy"""
self._addMethod("Allow", verb, resource, [])
def denyMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy"""
self._addMethod("Deny", verb, resource, [])
def allowMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide
/reference_policies_elements.html#Condition"""
self._addMethod("Allow", verb, resource, conditions)
def denyMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide
/reference_policies_elements.html#Condition"""
self._addMethod("Deny", verb, resource, conditions)
def build(self):
"""Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy."""
if (self.allowMethods is None or len(self.allowMethods) == 0) and (
self.denyMethods is None or len(self.denyMethods) == 0
):
raise NameError("No statements defined for the policy")
policy = {
"principalId": self.principalId,
"policyDocument": {"Version": self.version, "Statement": []},
}
policy["policyDocument"]["Statement"].extend(
self._getStatementForEffect("Allow", self.allowMethods)
)
policy["policyDocument"]["Statement"].extend(
self._getStatementForEffect("Deny", self.denyMethods)
)
return policy
|
[] |
[] |
[
"AWS_EXECUTION_ENV",
"API_COGNITO_REGION",
"API_COGNITO_CLIENT_ID",
"API_ADMIN_EMAILS",
"API_COGNITO_POOL_ID"
] |
[]
|
["AWS_EXECUTION_ENV", "API_COGNITO_REGION", "API_COGNITO_CLIENT_ID", "API_ADMIN_EMAILS", "API_COGNITO_POOL_ID"]
|
python
| 5 | 0 | |
tensorflow/python/tpu/tpu_embedding_v2_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.tpu import tpu_embedding
from tensorflow.python.tpu import tpu_embedding_v2
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
flags.DEFINE_string('model_dir', os.environ.get('TEST_TMPDIR'),
'A temporary directory.')
class TPUEmbeddingCheckpointTest(parameterized.TestCase, test.TestCase):
def setUp(self):
super(TPUEmbeddingCheckpointTest, self).setUp()
self.resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
remote.connect_to_cluster(self.resolver)
tpu_strategy_util.initialize_tpu_system(self.resolver)
self.strategy = tpu_strategy.TPUStrategy(self.resolver)
self.num_rows = self.strategy.num_replicas_in_sync
# These tests use two mid level API objects, initialized with different
# values. These have the same sizes.
with self.strategy.scope():
self.first_mid_level_contents = np.ones((self.num_rows, 4))
self.first_mid_level_optimizer = tpu_embedding_v2_utils.SGD(
learning_rate=0.1)
self.first_mid_level = self.build_mid_level(
self.first_mid_level_contents, self.first_mid_level_optimizer)
self.second_mid_level_contents = np.ones((self.num_rows, 4)) * 2
self.second_mid_level_optimizer = tpu_embedding_v2_utils.SGD(
learning_rate=0.1)
self.second_mid_level = self.build_mid_level(
self.second_mid_level_contents, self.second_mid_level_optimizer,
initialize_tpu_embedding=False)
self.cpu_mid_level_optimizer = tpu_embedding_v2_utils.SGD(
learning_rate=0.1)
self.cpu_mid_level = self.build_mid_level(
self.second_mid_level_contents, self.cpu_mid_level_optimizer)
def tearDown(self):
tpu_strategy_util.shutdown_tpu_system(self.resolver)
super(TPUEmbeddingCheckpointTest, self).tearDown()
def test_checkpoint_save_retrieves(self):
# Ensure that the variables from the first model are loaded.
self.first_mid_level._load_variables()
self.assertAllClose(
self.first_mid_level_contents,
self.make_checkpoint_and_get_embedding('before_load',
self.first_mid_level),
msg='Checkpoint should contain values from the first api object.')
self.second_mid_level._load_variables()
# When we load the variables from the second mid level API object to the TPU
# we expect that checkpointing the first mid level API object will now
# retrieve the values from the TPU which are now different from the current
# variables in the first mid level.
self.assertAllClose(
self.second_mid_level_contents,
self.make_checkpoint_and_get_embedding('after_load',
self.first_mid_level),
msg='Checkpoint should contain values from the second api object.')
def test_checkpoint_restore_loads(self):
def get_values(mid):
return ops.convert_to_tensor(
mid._variables['table']['parameters'].variables[0])
self.first_mid_level._load_variables()
first_checkpoint = util.Checkpoint(model=self.first_mid_level)
first_checkpoint.save(_get_tmpdir('restore', 'save'))
# Checkpoint now has values from first_mid_level. See first assert in
# test_checkpoint_save_retrieves.
self.second_mid_level._load_variables()
self.assertAllClose(
self.second_mid_level_contents,
get_values(self.second_mid_level),
msg='Second mid level api should contain its initial values.',
)
# We restore the checkpoint of our first model into our second model.
# This should load the first mid level API object onto the TPU.
second_checkpoint = util.Checkpoint(model=self.second_mid_level)
second_checkpoint.restore(_get_tmpdir('restore', 'save-1'))
# Call retrieve here as a way to check what the TPU contains contains.
# Calling the retrieve ops directly might make for a cleaner separation of
# test and module, though.
self.second_mid_level._retrieve_variables()
self.assertAllClose(
self.first_mid_level_contents,
get_values(self.second_mid_level),
msg='Second mid level api should have retrieved the first model values.'
)
def test_checkpoint_restore_before_variable_creation(self):
class TestModule(module.Module):
def __init__(self, initializer, rows):
self._initializer = initializer
self._rows = rows
def create_embedding(self):
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=self._rows, dim=4, initializer=self._initializer,
combiner='sum', name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
optimizer = tpu_embedding_v2_utils.SGD()
self.tpu_embedding = tpu_embedding_v2.TPUEmbedding(
feature_config, self._rows, optimizer)
# We need to clear the already loaded config provided by setUp method.
tpu_strategy_util.initialize_tpu_system(self.resolver)
with self.strategy.scope():
module1 = TestModule(init_ops_v2.Ones(),
self.strategy.num_replicas_in_sync * 2)
module1.create_embedding()
checkpoint = util.Checkpoint(test_module=module1)
checkpoint.save(_get_tmpdir('restore_before_create', 'save'))
tpu_strategy_util.initialize_tpu_system(self.resolver)
with self.strategy.scope():
module2 = TestModule(init_ops_v2.Zeros(),
self.strategy.num_replicas_in_sync * 2)
checkpoint = util.Checkpoint(test_module=module2)
checkpoint.restore(_get_tmpdir('restore_before_create', 'save-1'))
with self.strategy.scope():
module2.create_embedding()
def get_values(mid):
return mid._variables['table']['parameters'].variables[0].numpy()
self.assertAllClose(np.ones((self.strategy.num_replicas_in_sync * 2, 4)),
get_values(module2.tpu_embedding))
# Fetch the values from the TPU to check that they are the same.
module2.tpu_embedding._retrieve_variables()
self.assertAllClose(np.ones((self.strategy.num_replicas_in_sync * 2, 4)),
get_values(module2.tpu_embedding))
def build_mid_level(self, embedding_values, optimizer,
initialize_tpu_embedding=True):
"""Creates an embedding api object initialized to embedding_values."""
initializer = init_ops_v2.Constant(embedding_values)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=self.num_rows, dim=4, initializer=initializer,
combiner='sum', name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
# batch_size here does not matter as we aren't training in any of these
# tests.
return tpu_embedding_v2.TPUEmbedding(
feature_config, 64, optimizer,
initialize_tpu_embedding=initialize_tpu_embedding)
def make_checkpoint_and_get_embedding(self, name, model):
"""Saves model to checkpoint name, retrieves embedding variables."""
checkpoint = util.Checkpoint(model=model)
checkpoint.save(_get_tmpdir(name, 'save'))
# Get the name of the parameters variable which should be the only
# [self.num_rows, 4] shaped tensor in the checkpoint. Note that we do this
# as the key can change.
variables = checkpoint_utils.list_variables(_get_tmpdir(name))
variables = [name for name, size in variables if size == [self.num_rows, 4]]
if len(variables) != 1:
raise RuntimeError('Found {} copies of the parameter variable in the '
'checkpoint. Exactly one copy exported.'.format(
len(variables)))
return checkpoint_utils.load_variable(_get_tmpdir(name), variables[0])
def test_model_export_cpu(self):
self.first_mid_level._load_variables()
tpu_checkpoint = util.Checkpoint(model=self.first_mid_level)
tpu_checkpoint.save(_get_tmpdir('export_cpu', 'save'))
# We restore the checkpoint of our tpu mid level onto our cpu mid level.
cpu_checkpoint = util.Checkpoint(model=self.cpu_mid_level)
cpu_checkpoint.restore(_get_tmpdir('export_cpu', 'save-1'))
@def_function.function
def serve_tensors(features):
features = tpu_embedding_v2.cpu_embedding_lookup(
features, None, self.cpu_mid_level.embedding_tables,
self.cpu_mid_level._feature_config)
return features[0]
signatures = {
'serving_default':
serve_tensors.get_concrete_function(
(tensor_spec.TensorSpec(
shape=(2,), dtype=dtypes.int32, name='feature'),))}
save.save(self.cpu_mid_level,
export_dir=_get_tmpdir('export_cpu', 'exported_model'),
signatures=signatures)
imported = load.load(_get_tmpdir('export_cpu', 'exported_model'))
predict_fn = imported.signatures['serving_default']
input_feature_value = np.array([1, 0])
input_batch = (constant_op.constant(input_feature_value,
dtype=dtypes.int32),)
prediction = predict_fn(*input_batch)['output_0']
self.assertAllClose(prediction.numpy(),
self.first_mid_level_contents[input_feature_value])
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam)
def test_check_checkpoint_variable_names_are_same_on_cpu_and_tpu(self,
optimizer):
# Reinitialize the TPU so that we can re-initialize the embeddings with the
# given optimizer.
tpu_strategy_util.initialize_tpu_system(self.resolver)
optimizer = optimizer(learning_rate=0.1)
with self.strategy.scope():
tpu_mid_level = self.build_mid_level(
self.first_mid_level_contents, optimizer)
tpu_checkpoint = util.Checkpoint(model=tpu_mid_level)
tpu_checkpoint.save(_get_tmpdir('save-tpu', 'save'))
tpu_variables = checkpoint_utils.list_variables(_get_tmpdir('save-tpu'))
cpu_mid_level = self.build_mid_level(
self.first_mid_level_contents, optimizer)
cpu_checkpoint = util.Checkpoint(model=cpu_mid_level)
cpu_checkpoint.save(_get_tmpdir('save-cpu', 'save'))
cpu_variables = checkpoint_utils.list_variables(_get_tmpdir('save-cpu'))
self.assertAllEqual(tpu_variables, cpu_variables)
class TPUEmbeddingTest(parameterized.TestCase, test.TestCase):
def setUp(self):
super(TPUEmbeddingTest, self).setUp()
self.embedding_values = np.array(list(range(32)), dtype=np.float64)
self.initializer = init_ops_v2.Constant(self.embedding_values)
# Embedding for video initialized to
# 0 1 2 3
# 4 5 6 7
# ...
self.table_video = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=8,
dim=4,
initializer=self.initializer,
combiner='sum',
name='video')
# Embedding for user initialized to
# 0 1
# 2 3
# 4 5
# 6 7
# ...
self.table_user = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=16,
dim=2,
initializer=self.initializer,
combiner='mean',
name='user')
self.feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends'))
self.batch_size = 2
self.data_batch_size = 4
# One (global) batch of inputs
# sparse tensor for watched:
# row 0: 0
# row 1: 0, 1
# row 2: 0, 1
# row 3: 1
self.feature_watched_indices = [[0, 0], [1, 0], [1, 1],
[2, 0], [2, 1], [3, 0]]
self.feature_watched_values = [0, 0, 1, 0, 1, 1]
self.feature_watched_row_lengths = [1, 2, 2, 1]
# sparse tensor for favorited:
# row 0: 0, 1
# row 1: 1
# row 2: 0
# row 3: 0, 1
self.feature_favorited_indices = [[0, 0], [0, 1], [1, 0],
[2, 0], [3, 0], [3, 1]]
self.feature_favorited_values = [0, 1, 1, 0, 0, 1]
self.feature_favorited_row_lengths = [2, 1, 1, 2]
# sparse tensor for friends:
# row 0: 3
# row 1: 0, 1, 2
# row 2: 3
# row 3: 0, 1, 2
self.feature_friends_indices = [[0, 0], [1, 0], [1, 1], [1, 2],
[2, 0], [3, 0], [3, 1], [3, 2]]
self.feature_friends_values = [3, 0, 1, 2, 3, 0, 1, 2]
self.feature_friends_row_lengths = [1, 3, 1, 3]
self.resolver = None
def tearDown(self):
if self.resolver:
tpu_strategy_util.shutdown_tpu_system(self.resolver)
super(TPUEmbeddingTest, self).tearDown()
def test_tables_with_same_name(self):
with self.assertRaisesRegex(
ValueError, 'Multiple tables with name table found.'):
with self._get_strategy().scope():
tpu_embedding_v2.TPUEmbedding(
(tpu_embedding_v2_utils.FeatureConfig(
table=tpu_embedding_v2_utils.TableConfig(
name='table',
vocabulary_size=4,
dim=2,
initializer=self.initializer,),
name='watched'),
tpu_embedding_v2_utils.FeatureConfig(
table=tpu_embedding_v2_utils.TableConfig(
name='table',
vocabulary_size=4,
dim=2,
initializer=self.initializer),
name='favorited')),
self.batch_size,
tpu_embedding_v2_utils.SGD(learning_rate=0.1))
def test_unsupported_optimizer(self):
with self.assertRaisesRegex(
ValueError, 'is an unsupported optimizer class.'):
with self._get_strategy().scope():
tpu_embedding_v2.TPUEmbedding(
self.feature_config, self.batch_size,
tpu_embedding.AdagradParameters(learning_rate=0.1))
def test_pass_non_tensor_to_apply_gradients(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
@def_function.function
def test_apply():
mid_level_api.apply_gradients((1, 2, 3))
with self.assertRaisesRegex(ValueError, 'Expected Tensor.'):
strategy.run(test_apply)
def test_pass_different_structure_to_apply_gradients(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
@def_function.function
def test_apply():
# This should be a tuple as feature_config is a tuple of 3 configs.
mid_level_api.apply_gradients([1, 2, 3])
with self.assertRaisesRegex(
TypeError,
'The two structures don\'t have the same nested structure.'):
strategy.run(test_apply)
def test_pass_none_to_apply_gradients(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
data = next(iter(strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))))
@def_function.function
def embedding_and_set_gradients(data):
mid_level_api.enqueue(data)
def tpu_fn():
results = mid_level_api.dequeue()
mid_level_api.apply_gradients((None, None,
array_ops.ones_like(results[2])))
return results
return strategy.run(tpu_fn)
@def_function.function
def embedding_only(data):
mid_level_api.enqueue(data, training=False)
def tpu_fn():
return mid_level_api.dequeue()
return strategy.run(tpu_fn)
first = self._get_replica_numpy(
embedding_and_set_gradients(data), strategy, 0)
second = self._get_replica_numpy(embedding_only(data), strategy, 0)
# First two features should be the same as None gradient was applied.
# Third feature had gradient of 1 passed in from each core.
# Each core received the same ids per core and returned the following batch:
# [ row 3, row 0 + row 1 + row 2 ]
# so gradient update was (learning rate = 0.1):
# row 0: -1/3*0.1
# row 1: -1/3*0.1
# row 2: -1/3*0.1
# row 3: -1*0.1
# There is a factor of num_replicas because each replica gave an update.
num_replicas = strategy.num_replicas_in_sync
update = ([[0.0]], [[0.0]],
[[0.1 * num_replicas], [0.1 / 3 * num_replicas]])
golden = tuple([feature-np.array(up) for feature, up in zip(first, update)])
self.assertAllClose(golden, second)
def _get_strategy(self):
self.resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
remote.connect_to_cluster(self.resolver)
tpu_strategy_util.initialize_tpu_system(self.resolver)
return tpu_strategy.TPUStrategy(self.resolver)
def test_dequeue_on_cpu(self):
mid_level_api = self._create_mid_level()
with self.assertRaises(RuntimeError):
mid_level_api.dequeue()
def test_enqueue_on_cpu(self):
mid_level_api = self._create_mid_level()
features = {
'watched': sparse_tensor.SparseTensor(
indices=self.feature_watched_indices,
values=self.feature_watched_values,
dense_shape=[2, 2])}
with self.assertRaises(RuntimeError):
mid_level_api.enqueue(features)
def test_apply_gradients_on_cpu(self):
mid_level_api = self._create_mid_level()
with self.assertRaises(RuntimeError):
mid_level_api.enqueue(None)
def test_get_embedding_tables_on_cpu(self):
mid_level_api = self._create_mid_level()
self.assertEqual(
set(mid_level_api.embedding_tables.keys()),
set([self.table_video, self.table_user]))
def test_get_embedding_tables_on_tpu(self):
with self._get_strategy().scope():
mid_level_api = self._create_mid_level()
with self.assertRaises(RuntimeError):
mid_level_api.embedding_tables()
def test_enqueue_weight_for_dense_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
input_fn = self._create_dense_input_fn(strategy, include_weights=True)
dist = strategy.experimental_distribute_datasets_from_function(
input_fn,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features, weights = next(dist_iter)
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(step)
with self.assertRaisesRegex(ValueError, 'Weight specified for dense input'):
test_fn()
def test_enqueue_wrong_weight_type_for_sparse_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
ragged = self._create_ragged_dataset(strategy, include_weights=True)
sparse_iter = iter(strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
ragged_iter = iter(strategy.experimental_distribute_dataset(
ragged,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features = next(sparse_iter)
_, weights = next(ragged_iter)
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(step)
with self.assertRaisesRegex(
ValueError, 'which does not match type input which is SparseTensor.'):
test_fn()
def test_enqueue_wrong_weight_type_for_ragged_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy, include_weights=True)
ragged = self._create_ragged_dataset(strategy)
sparse_iter = iter(strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
ragged_iter = iter(strategy.experimental_distribute_dataset(
ragged,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
_, weights = next(sparse_iter)
features = next(ragged_iter)
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(step)
with self.assertRaisesRegex(
ValueError, 'which does not match type input which is RaggedTensor.'):
test_fn()
def test_enqueue_sparse_and_ragged(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
ragged = self._create_ragged_dataset(strategy)
sparse_iter = iter(strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
ragged_iter = iter(strategy.experimental_distribute_dataset(
ragged,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
sparse_features = next(sparse_iter)
ragged_features = next(ragged_iter)
features = (sparse_features[0], ragged_features[1], sparse_features[2])
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
with self.assertRaisesRegex(
ValueError, 'Found both SparseTensors and RaggedTensors'):
test_fn()
def test_enqueue_incorrect_structure_for_features(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
sparse_iter = iter(strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features = next(sparse_iter)
features = (features[0],)
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
# The error here is raised from nest.assert_same_structure
with self.assertRaises(ValueError):
test_fn()
def test_enqueue_incorrect_structure_for_weights(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy, include_weights=True)
sparse_iter = iter(strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features, weights = next(sparse_iter)
weights = (weights[0],)
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(step)
# The error here is raised from nest.assert_same_structure
with self.assertRaises(ValueError):
test_fn()
def test_enqueue_ragged_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
ragged = self._create_ragged_dataset(strategy)
sparse_iter = iter(strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
ragged_iter = iter(strategy.experimental_distribute_dataset(
ragged,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def test_fn():
def get_activations():
return mid_level_api.dequeue()
sparse_features = next(sparse_iter)
ragged_features = next(ragged_iter)
mid_level_api.enqueue(sparse_features, training=False)
sparse_activations = strategy.run(get_activations)
mid_level_api.enqueue(ragged_features, training=False)
ragged_activations = strategy.run(get_activations)
return sparse_activations, ragged_activations
sparse_activations, ragged_activations = test_fn()
# Extact per core numpy arrays and check that both sparse and ragged have
# the same results.
sparse0 = self._get_replica_numpy(sparse_activations, strategy, 0)
ragged0 = self._get_replica_numpy(ragged_activations, strategy, 0)
self.assertAllClose(sparse0, ragged0)
def test_enqueue_cpu_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
input_fn = self._create_dense_input_fn(strategy)
sparse_iter = iter(strategy.experimental_distribute_datasets_from_function(
input_fn))
@def_function.function
def test_fn():
def get_activations():
return mid_level_api.dequeue()
features = next(sparse_iter)
mid_level_api.enqueue(features, training=False)
activations = strategy.run(get_activations)
return activations
with self.assertRaisesRegex(ValueError, 'which is on a TPU input device'):
test_fn()
@parameterized.parameters([True, False])
def test_enqueue_cpu_tensor_with_outside_compilation(self, use_mlir):
if use_mlir:
config.enable_mlir_bridge()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
input_fn = self._create_dense_input_fn(strategy)
sparse_iter = iter(strategy.experimental_distribute_datasets_from_function(
input_fn))
@def_function.function
def test_fn():
def get_activations(features):
mid_level_api.enqueue(features, training=False)
return mid_level_api.dequeue()
activations = strategy.run(get_activations, args=(next(sparse_iter),))
return activations
with self.assertRaisesRegex(ValueError, 'which is on a TPU input device'):
test_fn()
@parameterized.parameters(True, False)
def test_enqueue_with_weights(self, ragged):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
weight = 0.5
if ragged:
dataset = self._create_ragged_dataset(strategy, include_weights=True,
weight=weight)
else:
dataset = self._create_sparse_dataset(strategy, include_weights=True,
weight=weight)
dataset_iter = iter(strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def enqueue_and_get(features, weights):
def get_activations():
return mid_level_api.dequeue()
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(get_activations)
features, weights = next(dataset_iter)
# Replace the weight for the second feature by None to test.
weights = (weights[0], None, weights[2])
no_weights_activations = enqueue_and_get(features, weights=None)
weights_activations = enqueue_and_get(features, weights=weights)
# Extact per core numpy arrays.
no_weights0 = self._get_replica_numpy(no_weights_activations, strategy, 0)
weights0 = self._get_replica_numpy(weights_activations, strategy, 0)
# videos table has sum combiner and users table has mean combiner.
# i.e. users table lookups isn't affected by the weights as all the weights
# are the same.
# Tuple entry 0 and 1 are the watched and favorited features from the videos
# table and entry 2 is the friends feature from the users table.
# Note that None was passed as a weight for entry 1 so weight should have no
# effect.
weight = (0.5, 1.0, 1.0)
golden = tuple([no_weight * w for no_weight, w in zip(no_weights0, weight)])
self.assertAllClose(golden, weights0)
@parameterized.parameters([True, False])
def test_enqueue_with_outside_compilation(self, use_mlir):
if use_mlir:
config.enable_mlir_bridge()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def enqueue_with_outside_compilation(data):
def get_activations(features):
mid_level_api.enqueue(features, training=False)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(data,))
@def_function.function
def enqueue_without_outside_compilation(data):
def get_activations():
return mid_level_api.dequeue()
mid_level_api.enqueue(data, training=False)
return strategy.run(get_activations)
features = next(dataset_iter)
activations_oc = enqueue_with_outside_compilation(features)
activations = enqueue_without_outside_compilation(features)
# Extact per core numpy arrays.
activations_oc0 = self._get_replica_numpy(activations_oc, strategy, 0)
activations0 = self._get_replica_numpy(activations, strategy, 0)
self.assertAllClose(activations_oc0, activations0)
@parameterized.parameters(True, False)
def test_enqueue_with_outside_compilation_in_control_flow(self, use_mlir):
if use_mlir:
config.enable_mlir_bridge()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
# This is one way to force the enqueue in some control flow. @tf.functions
# aren't inlined in the calling tf.function. An alternative would be to
# place the enqueue in a switch_v2 or something similar.
@def_function.function
def enqueue_fn(features):
mid_level_api.enqueue(features, training=False)
@def_function.function
def enqueue_with_outside_compilation():
def get_activations(features):
enqueue_fn(features)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(next(dataset_iter),))
with self.assertRaisesRegex(
RuntimeError,
'does not match graph which contains TPUReplicateContext'):
enqueue_with_outside_compilation()
def test_enqueue_with_outside_compilation_non_direct_input(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def enqueue_with_outside_compilation():
def get_activations(features):
# This inserts a mul operation on the TPU to trigger the direct input
# error.
features = (features[0]*2, features[1]*2, features[2]*2)
mid_level_api.enqueue(features, training=False)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(next(dataset_iter),))
with self.assertRaisesRegex(
ValueError, 'which does not have the `_tpu_input_identity` attr'):
enqueue_with_outside_compilation()
def test_enqueue_with_outside_compilation_auto_mode(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False)))
@def_function.function
def enqueue_with_no_gradient_apply(data):
def get_activations(features):
# Note the lack of setting training=False, so training defaults to true
# here even though we don't have apply gradients.
# We detect the correct mode based on which ops exist that share the
# same 'name'.
mid_level_api.enqueue(features, name='call1')
return mid_level_api.dequeue(name='call1')
return strategy.run(get_activations, args=(data,))
@def_function.function
def enqueue_with_gradient_apply(data):
def get_activations(features):
mid_level_api.enqueue(features, name='call2')
activations = mid_level_api.dequeue(name='call2')
# Apply an all ones gradient
gradients = nest.map_structure(array_ops.ones_like, activations)
mid_level_api.apply_gradients(gradients, name='call2')
return activations
return strategy.run(get_activations, args=(data,))
data = next(dataset_iter)
before_gradient_apply = enqueue_with_gradient_apply(data)
after_gradient_apply = enqueue_with_no_gradient_apply(data)
before_gradient_apply0 = self._get_replica_numpy(before_gradient_apply,
strategy, 0)
after_gradient_apply0 = self._get_replica_numpy(after_gradient_apply,
strategy, 0)
num_replicas = strategy.num_replicas_in_sync
# We are passing a gradient of 1 for all lookups, optimizer is SGD with a
# learning rate of 0.1. Feature 0 and 1 are looked up with a sum combiner
# with the following ids:
# Feature 0: [0, 0, 1], [0, 1, 1], ... repeated over num_replicas
# Feature 1: [0, 1, 1], [0, 0, 1], ... repeated over num_replicas
# i.e. Row 0 and 1 were looked up 3*num_replicas times over all cores and as
# the gradient is 1, the accumulated gradient is 3*num_replicas for each
# position in row 0 and 1 in table.
#
# See comments in test_pass_none_to_apply_gradients for the update to
# Feature 2 and its table.
# The *2 in the next tests are because those rows have 2 lookups vs
# the 1 lookup in the other row.
update = ([[0.3 * num_replicas], [0.3 * num_replicas * 2]],
[[0.3 * num_replicas * 2], [0.3 * num_replicas]],
[[0.1 * num_replicas], [0.1 / 3 * num_replicas]])
golden = tuple([before - np.array(up) for before, up in
zip(before_gradient_apply0, update)])
self.assertAllClose(golden, after_gradient_apply0)
def _create_strategy_and_mid_level(self, optimizer_name):
strategy = self._get_strategy()
with strategy.scope():
if optimizer_name == 'sgd':
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
elif optimizer_name == 'adagrad':
optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
elif optimizer_name == 'adam':
optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
else:
raise ValueError('optimizer is not recognized: ', optimizer_name)
mid_level_api = self._create_mid_level(optimizer=optimizer)
return strategy, mid_level_api, optimizer
@parameterized.parameters(
*itertools.product(
['sgd', 'adagrad', 'adam'],
[True, False]))
def test_embedding(self, optimizer_name, training):
strategy, mid_level_api, optimizer = (
self._create_strategy_and_mid_level(optimizer_name))
dataset = self._create_sparse_dataset(strategy)
dist = strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
"""Create and run computation that returns the embedding activations."""
if not training:
activations = mid_level_api.dequeue()
total_loss = _get_total_loss_tensor(activations)
ret_val = [total_loss] + list(activations)
return ret_val
else:
with backprop.GradientTape() as tape:
activations = mid_level_api.dequeue()
tape.watch(activations)
total_loss = _get_total_loss_tensor(activations)
loss_per_replica = total_loss / strategy.num_replicas_in_sync
gradients = tape.gradient(loss_per_replica, activations)
mid_level_api.apply_gradients(gradients)
ret_val = [total_loss] + list(activations)
return ret_val
mid_level_api.enqueue(next(dist_iter), training=training)
result = strategy.run(step)
return result
# Run model.
shard_out_val = test_fn()
# Retrieve TPU weights to CPU.
mid_level_api._retrieve_variables()
# Compute sparse tensors for global batch.
input_data = next(iter(self._create_sparse_dataset(strategy)))
# Check results.
self._check_results(strategy, shard_out_val, training, input_data,
mid_level_api._variables,
optimizer)
def _create_mid_level(self, optimizer=None):
# Create `TPUEmbedding` object.
if optimizer is None:
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
num_replicas = (
distribution_strategy_context.get_strategy().num_replicas_in_sync)
return tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config,
batch_size=self.batch_size * num_replicas,
optimizer=optimizer)
def _create_sparse_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
sparse_features = (
sparse_tensor.SparseTensor(
indices=self.feature_watched_indices,
values=self.feature_watched_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_favorited_indices,
values=self.feature_favorited_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_friends_indices,
values=self.feature_friends_values,
dense_shape=[self.data_batch_size, 3]))
if include_weights:
weights = []
for sparse in sparse_features:
values = (
array_ops.ones_like(sparse.values, dtype=dtypes.float32) * weight)
weights.append(sparse_tensor.SparseTensor(
indices=sparse.indices,
values=values,
dense_shape=sparse.dense_shape))
sparse_features = (sparse_features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(sparse_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_ragged_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
ragged_features = (
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_watched_row_lengths,
values=self.feature_watched_values),
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_favorited_row_lengths,
values=self.feature_favorited_values),
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_friends_row_lengths,
values=self.feature_friends_values))
if include_weights:
weights = []
for ragged in ragged_features:
weights.append(ragged.with_values(
array_ops.ones_like(ragged.values, dtype=dtypes.float32) * weight))
ragged_features = (ragged_features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(ragged_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_dense_input_fn(self, strategy, include_weights=False, weight=0.5):
def input_fn(ctx):
del ctx
features = (
constant_op.constant(self.feature_watched_values[-2:],
dtype=dtypes.int32),
constant_op.constant(self.feature_favorited_values[-2:],
dtype=dtypes.int32),
constant_op.constant(self.feature_friends_values[-2:],
dtype=dtypes.int32))
if include_weights:
weights = [array_ops.ones_like(t, dtype=dtypes.float32) * weight
for t in features]
features = (features, tuple(weights))
return dataset_ops.DatasetV2.from_tensors(features).repeat()
return input_fn
def _check_results(self, strategy, shard_out_val, training, input_data,
table_to_variable, optimizer):
num_replicas = strategy.num_replicas_in_sync
# Unpack the values `strategy.run()` returns.
loss = _unpack(strategy, shard_out_val[0])
activation_watched = _unpack(strategy, shard_out_val[1])
activation_favorited = _unpack(strategy, shard_out_val[2])
activation_friends = _unpack(strategy, shard_out_val[3])
# Core 0:
# Calculate the values of embedding activations.
activation_watched_gold0 = np.array([[0, 1, 2, 3], [4, 6, 8, 10]])
activation_favorited_gold0 = np.array([[4, 6, 8, 10], [4, 5, 6, 7]])
# Second row of `activation_friends_gold0` is the mean of the following.
# row 0: 0 1
# row 1: 2 3
# row 2: 4 5
activation_friends_gold0 = np.array([[6, 7], [2, 3]])
loss_gold0 = _compute_loss(activation_watched_gold0,
activation_favorited_gold0,
activation_friends_gold0)
# Add on values from other cores:
# Activations for watched are an alternating sequence of
# activation_watched_gold0 and activation_favorited_gold0.
# For favorited it is the same but in the opposite order.
activation_watched_gold = np.concatenate(
(np.concatenate((np.expand_dims(activation_watched_gold0, axis=0),) *
(num_replicas // 2)),
np.concatenate((np.expand_dims(activation_favorited_gold0, axis=0),) *
(num_replicas // 2))),
axis=1).reshape([self.batch_size * num_replicas, 4])
activation_favorited_gold = np.concatenate(
(activation_watched_gold[self.batch_size:,],
activation_watched_gold[0:self.batch_size,]))
activation_friends_gold = np.concatenate(
(activation_friends_gold0,) * num_replicas)
loss_gold = [loss_gold0] * num_replicas
# Test values.
self.assertAllClose(activation_watched_gold, activation_watched)
self.assertAllClose(activation_favorited_gold, activation_favorited)
self.assertAllClose(activation_friends_gold, activation_friends)
self.assertAllClose(loss_gold, loss)
embedding_table_video_before = np.copy(
np.reshape(self.embedding_values, [8, 4]))
embedding_table_user_before = np.copy(
np.reshape(self.embedding_values, [16, 2]))
global_batch_size = self.batch_size * num_replicas
if training:
gradient_wrt_watched_gold = (2 * activation_watched_gold /
global_batch_size)
gradient_wrt_favorited_gold = (2 * activation_favorited_gold /
global_batch_size)
gradient_wrt_friends_gold = (2 * activation_friends_gold /
global_batch_size)
# Calculate gradients wrt embedding tables.
gradients_wrt_user = (
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_friends_gold,
embedding_table_user_before, input_data[2].indices.numpy(),
input_data[2].values.numpy(), self.table_user.combiner))
gradients_wrt_video = (
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_favorited_gold,
embedding_table_video_before, input_data[1].indices.numpy(),
input_data[1].values.numpy(), self.table_video.combiner) +
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_watched_gold,
embedding_table_video_before, input_data[0].indices.numpy(),
input_data[0].values.numpy(), self.table_video.combiner))
self._check_embedding_and_slot_variables(embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video,
optimizer,
table_to_variable)
def _check_embedding_and_slot_variables(self, embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video,
optimizer,
table_to_variable):
if isinstance(optimizer, tpu_embedding_v2_utils.SGD):
check_fn = self._check_embedding_and_slot_variables_for_sgd
elif isinstance(optimizer, tpu_embedding_v2_utils.Adagrad):
check_fn = self._check_embedding_and_slot_variables_for_adagrad
elif isinstance(optimizer, tpu_embedding_v2_utils.Adam):
check_fn = self._check_embedding_and_slot_variables_for_adam
else:
raise ValueError('optimizer is not recognized: ', type(optimizer))
check_fn(embedding_table_user_before, gradients_wrt_user,
optimizer, table_to_variable[self.table_user.name])
check_fn(embedding_table_video_before, gradients_wrt_video,
optimizer, table_to_variable[self.table_video.name])
def _check_embedding_and_slot_variables_for_sgd(self, embedding_table_before,
gradients,
optimizer,
variables):
embedding_table = np.copy(embedding_table_before)
embedding_table -= optimizer.learning_rate * np.sum(gradients, axis=0)
self.assertAllClose(_get_variable(variables['parameters']).numpy(),
embedding_table)
def _check_embedding_and_slot_variables_for_adagrad(self,
embedding_table_before,
gradients,
optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
accumulator = (
optimizer.initial_accumulator_value + np.sum(gradients, axis=0)**2)
embedding_table -= (
optimizer.learning_rate * np.sum(gradients, axis=0) /
np.sqrt(accumulator))
self.assertAllClose(_get_variable(variable['parameters']).numpy(),
embedding_table)
self.assertAllClose(_get_variable(variable['accumulators']).numpy(),
accumulator)
def _check_embedding_and_slot_variables_for_adam(self, embedding_table_before,
gradients,
optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
g = np.sum(gradients, axis=0)
v = g**2 * (1 - optimizer.beta_2)
m = g * (1 - optimizer.beta_1)
epsilon = optimizer.epsilon
# TPU Embeddings don't have the LR decay factor for Adam.
lr_modifier = 1
embedding_table -= (
m * optimizer.learning_rate * lr_modifier / (np.sqrt(v) + epsilon))
self.assertAllClose(_get_variable(variable['parameters']).numpy(),
embedding_table, rtol=1e-4)
self.assertAllClose(_get_variable(variable['momenta']).numpy(),
m, rtol=1e-4)
self.assertAllClose(_get_variable(variable['velocities']).numpy(),
v, rtol=1e-4)
def _get_replica_numpy(self, structured, strategy, replica_id):
def select_replica(x):
x = strategy.experimental_local_results(x)
if len(x) == 1:
return x.numpy()
return x[replica_id].numpy()
return nest.map_structure(select_replica, structured)
def test_dense_lookup(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
input_fn = self._create_dense_input_fn(strategy)
dist = strategy.experimental_distribute_datasets_from_function(
input_fn,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(dist_iter), training=False)
return strategy.run(step)
# Run model.
shard0 = self._get_replica_numpy(test_fn(), strategy, 0)
# embedding_values is a linear list, so we reshape to match the correct
# shape of the corresponding table before performing the lookup.
numpy_videos = np.reshape(self.embedding_values, (8, 4))
numpy_users = np.reshape(self.embedding_values, (16, 2))
golden = ((numpy_videos[self.feature_watched_values[-2:]],
numpy_videos[self.feature_favorited_values[-2:]],
numpy_users[self.feature_friends_values[-2:]]))
self.assertAllClose(shard0, golden)
def test_variable_learning_rate(self):
num_steps = 10
num_steps_float = float(num_steps)
starting_lr = 1.0
ending_lr = 0.5
strategy = self._get_strategy()
num_replicas = strategy.num_replicas_in_sync
# Create model with Keras.
with strategy.scope():
step_counter = tf_variables.Variable(0.0, dtypes.float32)
def lr_function():
return gen_math_ops.maximum(
ending_lr,
starting_lr + ((ending_lr - starting_lr) * step_counter) /
num_steps_float)
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=lr_function)
table_config = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=num_replicas,
dim=4,
initializer=init_ops_v2.Constant(np.zeros((num_replicas, 4))),
combiner='sum', name='table')
mid_level_api = tpu_embedding_v2.TPUEmbedding(
feature_config={
'feature': tpu_embedding_v2_utils.FeatureConfig(
table=table_config, name='feature')},
batch_size=num_replicas,
optimizer=optimizer)
feature = {'feature': constant_op.constant([0], dtype=dtypes.int32)}
def input_fn(ctx):
del ctx
return dataset_ops.DatasetV2.from_tensors(feature).repeat()
dist = strategy.experimental_distribute_datasets_from_function(
input_fn,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
with backprop.GradientTape() as tape:
activations = mid_level_api.dequeue()
tape.watch(activations)
result = math_ops.reduce_sum(activations['feature'])
loss = result / num_replicas
grads = tape.gradient(loss, activations)
mid_level_api.apply_gradients(grads)
return activations['feature']
mid_level_api.enqueue(next(dist_iter), training=True)
return strategy.run(step)
# Run model.
results = []
for _ in range(num_steps):
result = test_fn()
results.append(_unpack(strategy, result))
step_counter.assign_add(1.0)
# Table is 2 elements wide, per-replica batch size of 1, with id 0.
# Loss for the gradient is the sum of the entries divided by the number of
# replicas. Thus the per replica gradient is 1/#of replicas for row 0 and no
# other updates. The reduced gradient is therefore 1.
# Learning rate schedule over num_steps steps:
# 1.0 0.95 0.9 0.85 0.8 ...
# Since use SGD and the gradient is one, the first row of the table is
# [0, 0] [-1.0, -1.0] [-1.95, -1.95] [-2.85, -2.85] ... (the negative
# partial sums of the above).
learning_rates = [starting_lr - (starting_lr - ending_lr) / num_steps * j
for j in range(num_steps)]
cumsum = [sum(learning_rates[0:j]) for j in range(num_steps)]
goldens = [[[-cumsum[i]] * table_config.dim] * num_replicas
for i in range(10)]
self.assertAllClose(results, goldens)
@parameterized.parameters([True, False])
def test_optimizer_with_slot_creation_fn(self, use_tpu):
def slot_creation_fn(table, slot_names):
slots = {}
for slot in slot_names:
slots[slot] = tf_variables.Variable(
name='{}_{}'.format(table.name, slot),
initial_value=functools.partial(
init_ops_v2.Zeros(), shape=table.shape, dtype=dtypes.float32),
trainable=False)
return slots
optimizer = tpu_embedding_v2_utils.Adagrad(
learning_rate=0.1,
slot_variable_creation_fn=slot_creation_fn)
if use_tpu:
strategy = self._get_strategy()
else:
strategy = distribution_strategy_context.get_strategy()
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config,
batch_size=self.batch_size * num_replicas,
optimizer=optimizer)
video_accumulator = mid_level._variables['video']['accumulators']
user_accumulator = mid_level._variables['user']['accumulators']
if use_tpu:
# To check the table contents (ensure that it is zero rather than the
# normal initial accumulator value specified to in the optimizer config),
# we need to select the underlying table variable on TPU.
# We only have one shard on Forge.
video_accumulator = video_accumulator.variables[0]
user_accumulator = user_accumulator.variables[0]
self.assertAllClose(video_accumulator.numpy(),
np.zeros((self.table_video.vocabulary_size,
self.table_video.dim)))
self.assertAllClose(user_accumulator.numpy(),
np.zeros((self.table_user.vocabulary_size,
self.table_user.dim)))
def test_optimizer_with_slot_creation_fn_non_partial(self):
def slot_creation_fn(table, slot_names):
slots = {}
for slot in slot_names:
# Note that we don't pass functools.partial here, so on TPU we can't
# extract the shape. We expect the error below.
slots[slot] = tf_variables.Variable(
name='{}_{}'.format(table.name, slot),
initial_value=init_ops_v2.Zeros()(shape=table.shape,
dtype=dtypes.float32),
trainable=False)
return slots
optimizer = tpu_embedding_v2_utils.Adagrad(
learning_rate=0.1,
slot_variable_creation_fn=slot_creation_fn)
strategy = self._get_strategy()
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
with self.assertRaisesRegex(ValueError,
'Unable to extract initializer function'):
tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config,
batch_size=self.batch_size*num_replicas,
optimizer=optimizer)
def test_sequence_embeddings(self):
feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched',
max_sequence_length=2),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited',
max_sequence_length=2),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends',
max_sequence_length=3))
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
strategy = self._get_strategy()
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config=feature_config,
batch_size=self.batch_size * num_replicas,
optimizer=optimizer)
dataset = self._create_sparse_dataset(strategy)
data = next(iter(strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))))
@def_function.function
def embedding_and_set_gradients(data):
def tpu_fn():
activations = mid_level.dequeue()
mid_level.apply_gradients(nest.map_structure(array_ops.ones_like,
activations))
return activations
mid_level.enqueue(data)
return strategy.run(tpu_fn)
@def_function.function
def embedding_only(data):
def tpu_fn():
return mid_level.dequeue()
mid_level.enqueue(data)
return strategy.run(tpu_fn)
# Only check core 0.
before_update = self._get_replica_numpy(
embedding_and_set_gradients(data), strategy, 0)
after_update = self._get_replica_numpy(embedding_only(data), strategy, 0)
# For videos table, row 0 and row 1 are looked up 3*num_replicas times as
# they occur 3 times per replica (considering the features 0 and 1 which are
# both looked up in the videos table).
# Feature 0 has ids [0, 0, 1], [0, 1, 1], ... repeated over num_replicas
# Feature 1 has ids [0, 1, 1], [0, 0, 1], ... repeated over num_replicas
# This means that both rows 0 and 1 get a -0.1*3*num_replicas update
# For users table, each row is looked up twice:
# Feature 2 has ids [3, 0, 1, 2], .. repeated over num_replicas
# This means that we get a -0.1*num_replicas update to the third feature.
# In general this means that after the update, if we lookup feature 0 and 1
# the values will be 0.3*num_replicas lower per entry and for feature 2 they
# will be 0.1*num_replicas lower.
# The one issue that that these lookups contain padding values.
# For core 0, we get the first 2 elements of the 4 element batch.
# For feature 0, the indices are [[0, 0], [1, 0], [1, 1]] with max sequence
# length of 2, which means that [0, 1] will be 0s.
# For feature 1, the indices are [[0, 0], [0, 1], [1, 0]] with max sequence
# length of 2, which means that [1, 1] will be 0s.
# For feature 2, the indices are [[0, 0], [1, 0], [1, 1], [1, 2]] with max
# sequence length of 3, which means that [0, 1], [0, 2] will be 0s.
# The following masks represent that so that we only apply the above updates
# to the non-padding rows:
masks = (
np.array([[[1], [0]], [[1], [1]]]),
np.array([[[1], [1]], [[1], [0]]]),
np.array([[[1], [0], [0]], [[1], [1], [1]]]))
per_row_update = (0.3 * num_replicas,
0.3 * num_replicas,
0.1 * num_replicas)
golden = tuple([before - update * mask for before, update, mask in
zip(before_update, per_row_update, masks)])
self.assertAllClose(golden, after_update)
def _compute_gradients_wrt_embedding_table(batch_size,
gradient_wrt_activation,
embedding_table,
feature_indices,
feature_values,
combiner,
max_sequence_length=0):
"""Compute gradients wrt embedding_table.
Args:
batch_size: `int`, batch size.
gradient_wrt_activation: `np.array` with shape `batch_size` by
embedding `dimension`.
embedding_table: `np.array` with shape `vocabulary_size` by embedding
`dimension`.
feature_indices: `indices` as used to construct `SparseTensor`.
feature_values: `values` as used to construct `SparseTensor`.
combiner: `String`, 'mean' or 'sum'.
max_sequence_length: If non-zero, a sequence feature with the given length.
Returns:
Gradients wrt `embedding_table`, an `np.array`s with shape
`batch_size` by `vocabulary_size` by
embedding `dimension`.
Raises:
ValueError: if `combiner` is not one of 'mean' or 'sum'.
"""
if combiner not in ('mean', 'sum'):
raise ValueError('`combiner` must be mean or sum; got {}.'.format(combiner))
grads = []
for i in range(batch_size):
grad = np.zeros_like(embedding_table)
count = 0
for (batch_i, seq_index), vocabulary_id in zip(feature_indices,
feature_values):
if batch_i == i:
count += 1
if max_sequence_length > 0:
if seq_index < max_sequence_length:
grad[vocabulary_id, :] += gradient_wrt_activation[i, seq_index, :]
else:
grad[vocabulary_id, :] += gradient_wrt_activation[i, :]
if combiner == 'mean' and not max_sequence_length:
grad = grad / count
grads.append(grad)
return np.stack(grads)
def _unpack(strategy, per_replica_output):
per_replica_output = strategy.experimental_local_results(per_replica_output)
per_replica_output = array_ops.concat(per_replica_output, axis=0).numpy()
return per_replica_output
def _get_total_loss_tensor(activations):
losses = []
for activation in activations:
losses.append(
math_ops.reduce_mean(
math_ops.reduce_sum(
gen_math_ops.squared_difference(activation, 0), 1)))
total_loss = array_ops.expand_dims_v2(sum(losses), 0)
return total_loss
def _compute_loss(activation_watched, activation_favorited, activation_friends):
watched_loss = np.mean(np.sum(activation_watched**2, axis=1))
if len(activation_favorited.shape) == 2:
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=1))
else:
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=(1, 2)))
if len(activation_friends.shape) == 2:
friends_loss = np.mean(np.sum(activation_friends**2, axis=1))
else:
friends_loss = np.mean(np.sum(activation_friends**2, axis=(1, 2)))
loss = watched_loss + favorited_loss + friends_loss
return loss
def _get_tmpdir(name, subdir=''):
segments = [FLAGS.model_dir, name] + ([subdir] if subdir else [])
return os.path.join(*segments)
def _get_variable(variable):
if isinstance(variable, tpu_embedding_v2.TPUShardedVariable):
return variable.variables[0]
return variable
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
[] |
[] |
[
"TEST_TMPDIR"
] |
[]
|
["TEST_TMPDIR"]
|
python
| 1 | 0 | |
Cafeteria/wsgi.py
|
"""
WSGI config for Cafeteria project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Cafeteria.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
server/mail/mail_test.go
|
package mail
import (
"os"
"testing"
"github.com/fleetdm/fleet/server/kolide"
"github.com/fleetdm/fleet/server/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type mockMailer struct{}
func (m *mockMailer) SendEmail(e kolide.Email) error {
return nil
}
func getMailer() kolide.MailService {
if os.Getenv("MAIL_TEST") == "" {
return &mockMailer{}
}
return NewService()
}
var testFunctions = [...]func(*testing.T, kolide.MailService){
testSMTPPlainAuth,
testSMTPSkipVerify,
testSMTPNoAuth,
testMailTest,
}
func TestMail(t *testing.T) {
for _, f := range testFunctions {
r := getMailer()
t.Run(test.FunctionName(f), func(t *testing.T) {
f(t, r)
})
}
}
func testSMTPPlainAuth(t *testing.T, mailer kolide.MailService) {
mail := kolide.Email{
Subject: "smtp plain auth",
To: []string{"[email protected]"},
Config: &kolide.AppConfig{
SMTPConfigured: true,
SMTPAuthenticationType: kolide.AuthTypeUserNamePassword,
SMTPAuthenticationMethod: kolide.AuthMethodPlain,
SMTPUserName: "bob",
SMTPPassword: "secret",
SMTPEnableTLS: true,
SMTPVerifySSLCerts: true,
SMTPEnableStartTLS: true,
SMTPPort: 1025,
SMTPServer: "localhost",
SMTPSenderAddress: "[email protected]",
},
Mailer: &SMTPTestMailer{
BaseURL: "https://localhost:8080",
},
}
err := mailer.SendEmail(mail)
assert.Nil(t, err)
}
func testSMTPSkipVerify(t *testing.T, mailer kolide.MailService) {
mail := kolide.Email{
Subject: "skip verify",
To: []string{"[email protected]"},
Config: &kolide.AppConfig{
SMTPConfigured: true,
SMTPAuthenticationType: kolide.AuthTypeUserNamePassword,
SMTPAuthenticationMethod: kolide.AuthMethodPlain,
SMTPUserName: "bob",
SMTPPassword: "secret",
SMTPEnableTLS: true,
SMTPVerifySSLCerts: false,
SMTPEnableStartTLS: true,
SMTPPort: 1025,
SMTPServer: "localhost",
SMTPSenderAddress: "[email protected]",
},
Mailer: &SMTPTestMailer{
BaseURL: "https://localhost:8080",
},
}
err := mailer.SendEmail(mail)
assert.Nil(t, err)
}
func testSMTPNoAuth(t *testing.T, mailer kolide.MailService) {
mail := kolide.Email{
Subject: "no auth",
To: []string{"[email protected]"},
Config: &kolide.AppConfig{
SMTPConfigured: true,
SMTPAuthenticationType: kolide.AuthTypeNone,
SMTPEnableTLS: true,
SMTPVerifySSLCerts: true,
SMTPPort: 1025,
SMTPServer: "localhost",
SMTPSenderAddress: "[email protected]",
},
Mailer: &SMTPTestMailer{
BaseURL: "https://localhost:8080",
},
}
err := mailer.SendEmail(mail)
assert.Nil(t, err)
}
func testMailTest(t *testing.T, mailer kolide.MailService) {
mail := kolide.Email{
Subject: "test tester",
To: []string{"[email protected]"},
Config: &kolide.AppConfig{
SMTPConfigured: true,
SMTPAuthenticationType: kolide.AuthTypeNone,
SMTPEnableTLS: true,
SMTPVerifySSLCerts: true,
SMTPPort: 1025,
SMTPServer: "localhost",
SMTPSenderAddress: "[email protected]",
},
Mailer: &SMTPTestMailer{
BaseURL: "https://localhost:8080",
},
}
err := Test(mailer, mail)
assert.Nil(t, err)
}
func TestTemplateProcessor(t *testing.T) {
mailer := PasswordResetMailer{
BaseURL: "https://localhost.com:8080",
Token: "12345",
}
out, err := mailer.Message()
require.Nil(t, err)
assert.NotNil(t, out)
}
|
[
"\"MAIL_TEST\""
] |
[] |
[
"MAIL_TEST"
] |
[]
|
["MAIL_TEST"]
|
go
| 1 | 0 | |
jupyterhub_singleuser_profiles/api/api.py
|
import connexion
import os
import sys
import json
import logging
from jupyterhub_singleuser_profiles.profiles import SingleuserProfiles
from jupyterhub_singleuser_profiles import version as singleuser_profiles_version
from functools import wraps
from urllib.parse import quote
from flask import Flask
from flask import redirect
from flask import request
from flask import Response
from jupyterhub.services.auth import HubAuth
custom_notebook_namespace = os.environ.get('NOTEBOOK_NAMESPACE')
if not custom_notebook_namespace:
custom_notebook_namespace = None;
_PROFILES = SingleuserProfiles(notebook_namespace=custom_notebook_namespace, verify_ssl=False)
_PROFILES.load_profiles()
_LOGGER = logging.getLogger(__name__)
prefix = os.environ.get('JUPYTERHUB_SERVICE_PREFIX', '/')
auth = HubAuth(api_token=os.environ['JUPYTERHUB_API_TOKEN'], cache_max_age=60)
app = Flask(__name__)
def authenticated(f):
"""Decorator for authenticating with the Hub"""
@wraps(f)
def decorated(*args, **kwargs):
cookie = request.cookies.get(auth.cookie_name)
token = request.headers.get(auth.auth_header_name)
for_user = connexion.request.headers.get('For-User')
if cookie:
user = auth.user_for_cookie(cookie, use_cache=True)
if not user:
user = auth.user_for_cookie(cookie, use_cache=False)
elif token:
user = auth.user_for_token(token, use_cache=True)
if not user:
user = auth.user_for_token(token, use_cache=False)
else:
user = None
if user:
if for_user and user.get('admin'):
user['name'] = for_user
user['admin'] = False
return f(user=user, *args, **kwargs)
else:
# redirect to login url on failed auth
login_url = os.environ.get('JUPYTERHUB_LOGIN_URL')
if not login_url:
login_url = auth.login_url
return redirect(login_url + '?next=%s' % quote(request.path))
return decorated
@app.before_request
def before_request():
if request.is_secure:
return
url = request.url.replace("http://", "https://", 1)
code = 301
return redirect(url, code=code)
@authenticated
def whoami(user):
return Response(
json.dumps(user, indent=1, sort_keys=True), mimetype='application/json'
)
@authenticated
def get_instance(*args, **kwargs):
return _PROFILES.get_instance()
@authenticated
def get_user_cm(user):
cm = _PROFILES.user.get(user['name'])
return cm
@authenticated
def get_ui_config(user):
_PROFILES.load_profiles()
cm = _PROFILES.get_ui_configuration()
return cm
@authenticated
def update_user_cm(user, body):
_PROFILES.user.update(user['name'], data=body)
return _PROFILES.user.get(user['name'])
@authenticated
def get_sizes(pure_json=False, *args, **kwargs):
_PROFILES.load_profiles()
sizes_json = _PROFILES.get_sizes()
if sizes_json == {}:
return 'Not Found', 404
if pure_json:
return sizes_json
response = []
for size in sizes_json:
response.append(size['name'])
return response
@authenticated
def get_images(*args, **kwargs):
_PROFILES.load_profiles()
images = _PROFILES.images.get()
return images
@authenticated
def get_default_image(*args, **kwargs):
_PROFILES.load_profiles()
default_image = _PROFILES.images.get_default()
return default_image
@authenticated
def get_size_by_name(size_name, *args, **kwargs):
_PROFILES.load_profiles()
size_dict = _PROFILES.get_size(size_name)
if size_dict == {}:
return 'Not Found', 404
return size_dict
app = connexion.App(__name__, specification_dir='.', options={'swagger_ui':True})
app.add_api('swagger.yaml')
def main():
app.run(port=8181)
|
[] |
[] |
[
"JUPYTERHUB_API_TOKEN",
"JUPYTERHUB_SERVICE_PREFIX",
"JUPYTERHUB_LOGIN_URL",
"NOTEBOOK_NAMESPACE"
] |
[]
|
["JUPYTERHUB_API_TOKEN", "JUPYTERHUB_SERVICE_PREFIX", "JUPYTERHUB_LOGIN_URL", "NOTEBOOK_NAMESPACE"]
|
python
| 4 | 0 | |
pkg/sampleset/utils/ctrl_utils.go
|
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"time"
"github.com/dustin/go-humanize"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
)
// HasDeletionTimestamp method to check if an object need to delete.
func HasDeletionTimestamp(obj *metav1.ObjectMeta) bool {
return !obj.GetDeletionTimestamp().IsZero()
}
// HasFinalizer check
func HasFinalizer(obj *metav1.ObjectMeta, finalizer string) bool {
return ContainsString(obj.GetFinalizers(), finalizer)
}
func RemoveFinalizer(obj *metav1.ObjectMeta, finalizer string) {
finalizers := RemoveString(obj.Finalizers, finalizer)
obj.Finalizers = finalizers
}
func NoRequeue() (ctrl.Result, error) {
return ctrl.Result{}, nil
}
func RequeueWithError(err error) (ctrl.Result, error) {
return ctrl.Result{}, err
}
func RequeueAfter(requeueAfter time.Duration) (ctrl.Result, error) {
return ctrl.Result{RequeueAfter: requeueAfter}, nil
}
// ContainsString Determine whether the string array contains a specific string,
// return true if contains the string and return false if not.
func ContainsString(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
func RemoveString(slice []string, s string) (result []string) {
for _, item := range slice {
if item == s {
continue
}
result = append(result, item)
}
return
}
func NoZeroOptionToMap(optionMap map[string]reflect.Value, i interface{}) {
elem := reflect.ValueOf(i).Elem()
for i := 0; i < elem.NumField(); i++ {
value := elem.Field(i)
if value.IsZero() {
continue
}
field := elem.Type().Field(i)
tag := field.Tag.Get("json")
option := strings.Split(tag, ",")[0]
optionMap[option] = value
}
}
func NoZeroOptionToArgs(options interface{}) []string {
var args []string
elem := reflect.ValueOf(options).Elem()
for i := 0; i < elem.NumField(); i++ {
v := elem.Field(i)
if v.IsZero() {
continue
}
field := elem.Type().Field(i)
tag := field.Tag.Get("json")
opt := strings.Split(tag, ",")[0]
switch v.Kind() {
case reflect.Bool:
args = append(args, fmt.Sprintf("--%s", opt))
case reflect.Slice: // []string
for j := 0; j < v.Len(); j++ {
args = append(args, fmt.Sprintf(`--%s=%v`, opt, v.Index(j)))
}
default:
args = append(args, fmt.Sprintf("--%s=%v", opt, v))
}
}
return args
}
func DiskUsageOfPaths(timeout time.Duration, paths ...string) (string, error) {
filePaths := strings.Join(paths, " ")
arg := "du -scb --exclude \"./.*\" " + filePaths
ctx, cancel := context.WithTimeout(context.Background(), timeout*time.Second)
defer cancel()
var stdout, stderr bytes.Buffer
cmd := exec.CommandContext(ctx, "bash", "-c", arg)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
if err := cmd.Run(); err != nil {
return "", fmt.Errorf("cmd:%s, error: %s", cmd.String(), stderr.String())
}
lines := strings.Split(strings.TrimSpace(stdout.String()), "\n")
if len(lines) == 0 {
return "", fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
total := strings.TrimSpace(lines[len(lines)-1])
if !strings.Contains(total, "total") {
return "", fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
totalSlice := strings.FieldsFunc(total, func(r rune) bool { return r == ' ' || r == '\t' })
if len(totalSlice) == 0 {
return "", fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
totalSizeStr := strings.TrimSpace(totalSlice[0])
_, err := strconv.ParseUint(totalSizeStr, 10, 64)
if err != nil {
return "", fmt.Errorf("cmd:%s, parseUint error: %s", cmd.String(), err.Error())
}
return totalSizeStr, nil
}
func FileNumberOfPaths(timeout time.Duration, paths ...string) (string, error) {
filePaths := strings.Join(paths, " ")
arg := "ls -lR " + filePaths + "| grep \"^-\" | wc -l"
ctx, cancel := context.WithTimeout(context.Background(), timeout*time.Second)
defer cancel()
var stdout, stderr bytes.Buffer
cmd := exec.CommandContext(ctx, "bash", "-c", arg)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
if err := cmd.Run(); err != nil {
return "", fmt.Errorf("cmd:%s, error: %s", cmd.String(), stderr.String())
}
fileNum, err := strconv.ParseInt(strings.TrimSpace(stdout.String()), 10, 64)
if err != nil {
return "", fmt.Errorf("cmd:%s, parseUint error: %s", cmd.String(), err)
}
return humanize.Comma(fileNum), nil
}
func JuiceFileNumberOfPath(timeout time.Duration, path ...string) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout*time.Second)
defer cancel()
args := []string{"info"}
args = append(args, path...)
var stdout, stderr bytes.Buffer
cmd := exec.CommandContext(ctx, "juicefs", args...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
if err := cmd.Run(); err != nil {
return "", fmt.Errorf("cmd:%s, error: %s", cmd.String(), stderr.String())
}
lines := strings.Split(strings.TrimSpace(stdout.String()), "\n")
if len(lines) == 0 {
return "", fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
var fileLines []string
for _, line := range lines {
if strings.Contains(line, "files") {
fileLines = append(fileLines, line)
}
}
var fileNumTotal int64
for _, fileLine := range fileLines {
if fileLine == "" || len(strings.Split(fileLine, ":")) != 2 {
return "", fmt.Errorf("cmd:%s, fileLine:%s", cmd.String(), fileLine)
}
fileNumStr := strings.TrimSpace(strings.Split(fileLine, ":")[1])
fileNum, err := strconv.ParseInt(fileNumStr, 10, 64)
if err != nil {
return "", fmt.Errorf("cmd:%s, parseInt error: %s", cmd.String(), err)
}
fileNumTotal += fileNum
}
return humanize.Comma(fileNumTotal), nil
}
func DiskSpaceOfPaths(timeout time.Duration, paths ...string) (map[string]string, error) {
args := []string{"--output=fstype,size,used,avail", "-BK"}
args = append(args, paths...)
ctx, cancel := context.WithTimeout(context.Background(), timeout*time.Second)
defer cancel()
var stdout, stderr bytes.Buffer
cmd := exec.CommandContext(ctx, "df", args...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("cmd:%s, error: %s", cmd.String(), stderr.String())
}
lines := strings.Split(strings.TrimSpace(stdout.String()), "\n")
if len(lines) <= 1 {
return nil, fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
infoMap := make(map[string][]string)
for _, line := range lines[1:] {
infoStr := strings.TrimSpace(line)
infoList := strings.FieldsFunc(infoStr, func(r rune) bool { return r == ' ' || r == '\t' })
if len(infoList) != 4 {
return nil, fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
fsType := strings.TrimSpace(infoList[0])
infoMap[fsType] = infoList
}
var diskSizeTotal, diskUsedTotal, diskAvailTotal uint64
for _, infoList := range infoMap {
diskSizeStr := strings.TrimSuffix(strings.TrimSpace(infoList[1]), "K")
diskSize, err := strconv.ParseUint(diskSizeStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
diskSizeTotal += diskSize
diskUsedStr := strings.TrimSuffix(strings.TrimSpace(infoList[2]), "K")
diskUsed, err := strconv.ParseUint(diskUsedStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
diskUsedTotal += diskUsed
diskAvailStr := strings.TrimSuffix(strings.TrimSpace(infoList[3]), "K")
diskAvail, err := strconv.ParseUint(diskAvailStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("cmd:%s, output:%s", cmd.String(), stdout.String())
}
diskAvailTotal += diskAvail
}
diskStatus := map[string]string{
"diskSize": strconv.FormatUint(diskSizeTotal, 10),
"diskUsed": strconv.FormatUint(diskUsedTotal, 10),
"diskAvail": strconv.FormatUint(diskAvailTotal, 10),
}
return diskStatus, nil
}
func WaitFileCreatedWithTimeout(ctx context.Context, path string, duration time.Duration) bool {
ticker := time.NewTicker(duration)
defer ticker.Stop()
done := make(chan bool)
go func() {
for _, err := os.Stat(path); err != nil; _, err = os.Stat(path) {
if os.IsNotExist(err) {
time.Sleep(500 * time.Millisecond)
continue
}
break
}
done <- true
}()
for {
select {
case <-done:
return true
case <-ctx.Done():
return false
case <-ticker.C:
return false
}
}
}
func GetRuntimeImage() (string, error) {
image := os.Getenv("RUNTIME_IMAGE")
if image == "" {
return "", errors.New("RUNTIME_IMAGE is not in environment variable")
}
return image, nil
}
func GetHostName() (string, error) {
hostName := os.Getenv("HOSTNAME")
if hostName == "" {
return "", errors.New("HOSTNAME is not in environment variable")
}
return hostName, nil
}
|
[
"\"RUNTIME_IMAGE\"",
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME",
"RUNTIME_IMAGE"
] |
[]
|
["HOSTNAME", "RUNTIME_IMAGE"]
|
go
| 2 | 0 | |
pkg/settings/settings.go
|
package settings
import (
"encoding/json"
"io/ioutil"
"log"
"os"
)
type Account struct {
Username string `json:"username"`
Password string `json:"password"`
}
type Service struct {
DefaultTLSPort int `json:"default_tls_port"`
SecretTLSPort int `json:"secret_tls_port"`
APIPort int `json:"api_port"`
TrustedLocations []string `json:"trusted_locations"`
}
// Settings stores all values of configuration, make it singleton
var Settings Setting
type Setting struct {
Account Account `json:"account"`
Service Service `json:"service"`
Certificate CertJSON `json:"certificates"`
ConfigFile string `json:"-"`
CertificateDir string `json:"-"`
DatabasePath string `json:"-"`
// CertificateConfig CertificateConfiguration `json:"-"`
}
func (s *Setting) Parse() {
s.getKvCertDir()
s.getKvConfig()
s.getKvDatabasePath()
s.parseJSON()
}
func (s *Setting) parseJSON() {
// var schema *setting.CertJSON
log.Printf("Reading config file %s", s.ConfigFile)
contentBytes, _ := ioutil.ReadFile(s.ConfigFile)
_ = json.Unmarshal(contentBytes, &s)
// log.Println(s)
}
func (s *Setting) getKvCertDir() {
log.Print("reading env cert dir")
s.CertificateDir = os.Getenv("KV_CERT_DIR")
log.Printf("got settings.CertificateDir = %s", s.CertificateDir)
}
func (s *Setting) getKvConfig() {
log.Print("reading env config")
s.ConfigFile = os.Getenv("KV_CONFIG_FILE")
log.Printf("got settings.ConfigFile = %s", s.ConfigFile)
}
func (s *Setting) getKvDatabasePath() {
log.Print("reading env config")
s.DatabasePath = os.Getenv("KV_DB_PATH")
log.Printf("got settings.DatabasePath = %s", s.DatabasePath)
}
func init() {
log.SetPrefix("settings: ")
// Settings.Parse()
}
|
[
"\"KV_CERT_DIR\"",
"\"KV_CONFIG_FILE\"",
"\"KV_DB_PATH\""
] |
[] |
[
"KV_CERT_DIR",
"KV_DB_PATH",
"KV_CONFIG_FILE"
] |
[]
|
["KV_CERT_DIR", "KV_DB_PATH", "KV_CONFIG_FILE"]
|
go
| 3 | 0 | |
go/fsd-be/source/api/users.go
|
package api
import (
"context"
"log"
"net/http"
"os"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func AllUsers(c *gin.Context) {
if err := godotenv.Load(); err != nil {
log.Println("No .env file found")
}
mongodbUri := os.Getenv("MONGODB_URI")
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(mongodbUri))
if err != nil {
panic(err)
}
defer func() {
if err = client.Disconnect(context.TODO()); err != nil {
panic(err)
}
}()
collection := client.Database("fsd_dev").Collection("users")
filter := bson.D{}
cursor, err := collection.Find(context.TODO(), filter)
if err != nil {
panic(err)
}
var results []bson.M
if err = cursor.All(context.TODO(), &results); err != nil {
panic(err)
}
c.IndentedJSON(http.StatusOK, map[string]interface{}{"value": results})
}
func CreateUser(c *gin.Context) {
if err := godotenv.Load(); err != nil {
log.Println("No .env file found")
}
mongodbUri := os.Getenv("MONGODB_URI")
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(mongodbUri))
if err != nil {
panic(err)
}
defer func() {
if err = client.Disconnect(context.TODO()); err != nil {
panic(err)
}
}()
collection := client.Database("fsd_dev").Collection("users")
var document interface{}
if err := c.BindJSON(&document); err != nil {
return
}
result, err := collection.InsertOne(context.TODO(), document)
c.IndentedJSON(http.StatusCreated, map[string]interface{}{"value": result.InsertedID})
}
func ReadUser(c *gin.Context) {
if err := godotenv.Load(); err != nil {
log.Println("No .env file found")
}
mongodbUri := os.Getenv("MONGODB_URI")
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(mongodbUri))
if err != nil {
panic(err)
}
defer func() {
if err = client.Disconnect(context.TODO()); err != nil {
panic(err)
}
}()
var result bson.M
collection := client.Database("fsd_dev").Collection("users")
objectId, err := primitive.ObjectIDFromHex(c.Param("id"))
if err != nil {
panic("Invalid id")
}
filter := bson.D{{Key: "_id", Value: objectId}}
err = collection.FindOne(context.TODO(), filter).Decode(&result)
if err != nil {
if err == mongo.ErrNoDocuments {
c.IndentedJSON(http.StatusOK, map[string]interface{}{"value": []interface{}{}})
return
}
panic(err)
}
c.IndentedJSON(http.StatusOK, map[string]interface{}{"value": result})
}
func UpdateUser(c *gin.Context) {
if err := godotenv.Load(); err != nil {
log.Println("No .env file found")
}
mongodbUri := os.Getenv("MONGODB_URI")
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(mongodbUri))
if err != nil {
panic(err)
}
defer func() {
if err = client.Disconnect(context.TODO()); err != nil {
panic(err)
}
}()
collection := client.Database("fsd_dev").Collection("users")
objectId, err := primitive.ObjectIDFromHex(c.Param("id"))
if err != nil {
panic("Invalid id")
}
var data map[string]interface{}
if err := c.BindJSON(&data); err != nil {
return
}
var temp bson.D
for k, v := range data {
temp = append(temp, bson.E{Key: k, Value: v})
}
filter := bson.D{{Key: "_id", Value: objectId}}
update := bson.D{{Key: "$set", Value: temp}}
options := options.Update().SetUpsert(true)
result, err := collection.UpdateOne(context.TODO(), filter, update, options)
if err != nil {
panic(err)
}
c.IndentedJSON(http.StatusOK, map[string]interface{}{"value": result.ModifiedCount})
}
func DeleteUser(c *gin.Context) {
if err := godotenv.Load(); err != nil {
log.Println("No .env file found")
}
mongodbUri := os.Getenv("MONGODB_URI")
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(mongodbUri))
if err != nil {
panic(err)
}
defer func() {
if err = client.Disconnect(context.TODO()); err != nil {
panic(err)
}
}()
collection := client.Database("fsd_dev").Collection("users")
objectId, err := primitive.ObjectIDFromHex(c.Param("id"))
if err != nil {
panic("Invalid id")
}
filter := bson.D{{Key: "_id", Value: objectId}}
result, err := collection.DeleteOne(context.TODO(), filter)
if err != nil {
panic(err)
}
c.IndentedJSON(http.StatusOK, map[string]interface{}{"value": result.DeletedCount})
}
|
[
"\"MONGODB_URI\"",
"\"MONGODB_URI\"",
"\"MONGODB_URI\"",
"\"MONGODB_URI\"",
"\"MONGODB_URI\""
] |
[] |
[
"MONGODB_URI"
] |
[]
|
["MONGODB_URI"]
|
go
| 1 | 0 | |
WebSocket_Activity/wsgi.py
|
"""
WSGI config for WebSocket_Activity project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WebSocket_Activity.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
lib/galaxy_test/base/populators.py
|
import contextlib
import json
import os
import random
import string
import unittest
from collections import namedtuple
from functools import wraps
from operator import itemgetter
try:
from nose.tools import nottest
except ImportError:
def nottest(x):
return x
import requests
import yaml
from gxformat2 import (
convert_and_import_workflow,
ImporterGalaxyInterface,
)
from gxformat2._yaml import ordered_load
from pkg_resources import resource_string
from six import StringIO
from galaxy.tool_util.client.staging import InteractorStaging
from galaxy.tool_util.verify.test_data import TestDataResolver
from galaxy.tool_util.verify.wait import (
TimeoutAssertionError,
wait_on as tool_util_wait_on,
)
from galaxy.util import unicodify
from . import api_asserts
# Simple workflow that takes an input and call cat wrapper on it.
workflow_str = unicodify(resource_string(__name__, "data/test_workflow_1.ga"))
# Simple workflow that takes an input and filters with random lines twice in a
# row - first grabbing 8 lines at random and then 6.
workflow_random_x2_str = unicodify(resource_string(__name__, "data/test_workflow_2.ga"))
DEFAULT_TIMEOUT = 60 # Secs to wait for state to turn ok
SKIP_FLAKEY_TESTS_ON_ERROR = os.environ.get("GALAXY_TEST_SKIP_FLAKEY_TESTS_ON_ERROR", None)
def flakey(method):
@wraps(method)
def wrapped_method(test_case, *args, **kwargs):
try:
method(test_case, *args, **kwargs)
except unittest.SkipTest:
raise
except Exception:
if SKIP_FLAKEY_TESTS_ON_ERROR:
raise unittest.SkipTest("Error encountered during test marked as @flakey.")
else:
raise
return wrapped_method
def skip_without_tool(tool_id):
"""Decorate an API test method as requiring a specific tool.
Have test framework skip the test case if the tool is unavailable.
"""
def method_wrapper(method):
def get_tool_ids(api_test_case):
index = api_test_case.galaxy_interactor.get("tools", data=dict(in_panel=False))
tools = index.json()
# In panels by default, so flatten out sections...
tool_ids = [itemgetter("id")(_) for _ in tools]
return tool_ids
@wraps(method)
def wrapped_method(api_test_case, *args, **kwargs):
_raise_skip_if(tool_id not in get_tool_ids(api_test_case))
return method(api_test_case, *args, **kwargs)
return wrapped_method
return method_wrapper
def skip_without_datatype(extension):
"""Decorate an API test method as requiring a specific datatype.
Have test framework skip the test case if the datatype is unavailable.
"""
def has_datatype(api_test_case):
index_response = api_test_case.galaxy_interactor.get("datatypes")
assert index_response.status_code == 200, "Failed to fetch datatypes for target Galaxy."
datatypes = index_response.json()
assert isinstance(datatypes, list)
return extension in datatypes
def method_wrapper(method):
@wraps(method)
def wrapped_method(api_test_case, *args, **kwargs):
_raise_skip_if(not has_datatype(api_test_case))
method(api_test_case, *args, **kwargs)
return wrapped_method
return method_wrapper
def skip_if_site_down(url):
def site_down():
try:
response = requests.get(url, timeout=10)
return response.status_code != 200
except Exception:
return False
def method_wrapper(method):
@wraps(method)
def wrapped_method(api_test_case, *args, **kwargs):
_raise_skip_if(site_down(), "Test depends on [%s] being up and it appears to be down." % url)
method(api_test_case, *args, **kwargs)
return wrapped_method
return method_wrapper
skip_if_toolshed_down = skip_if_site_down("https://toolshed.g2.bx.psu.edu")
skip_if_github_down = skip_if_site_down("https://github.com/")
def summarize_instance_history_on_error(method):
@wraps(method)
def wrapped_method(api_test_case, *args, **kwds):
try:
method(api_test_case, *args, **kwds)
except Exception:
api_test_case.dataset_populator._summarize_history(api_test_case.history_id)
raise
return wrapped_method
@nottest
def uses_test_history(**test_history_kwd):
"""Can override require_new and cancel_executions using kwds to decorator.
"""
def method_wrapper(method):
@wraps(method)
def wrapped_method(api_test_case, *args, **kwds):
with api_test_case.dataset_populator.test_history(**test_history_kwd) as history_id:
method(api_test_case, history_id, *args, **kwds)
return wrapped_method
return method_wrapper
def _raise_skip_if(check, *args):
if check:
from nose.plugins.skip import SkipTest
raise SkipTest(*args)
# Deprecated mixin, use dataset populator instead.
# TODO: Rework existing tests to target DatasetPopulator in a setup method instead.
class TestsDatasets:
def _new_dataset(self, history_id, content='TestData123', **kwds):
return DatasetPopulator(self.galaxy_interactor).new_dataset(history_id, content=content, **kwds)
def _wait_for_history(self, history_id, assert_ok=False):
return DatasetPopulator(self.galaxy_interactor).wait_for_history(history_id, assert_ok=assert_ok)
def _new_history(self, **kwds):
return DatasetPopulator(self.galaxy_interactor).new_history(**kwds)
def _upload_payload(self, history_id, content, **kwds):
return DatasetPopulator(self.galaxy_interactor).upload_payload(history_id, content, **kwds)
def _run_tool_payload(self, tool_id, inputs, history_id, **kwds):
return DatasetPopulator(self.galaxy_interactor).run_tool_payload(tool_id, inputs, history_id, **kwds)
class BaseDatasetPopulator:
""" Abstract description of API operations optimized for testing
Galaxy - implementations must implement _get, _post and _delete.
"""
def new_dataset(self, history_id, content=None, wait=False, **kwds):
run_response = self.new_dataset_request(history_id, content=content, wait=wait, **kwds)
assert run_response.status_code == 200, "Failed to create new dataset with response: %s" % run_response.content
return run_response.json()["outputs"][0]
def new_dataset_request(self, history_id, content=None, wait=False, **kwds):
if content is None and "ftp_files" not in kwds:
content = "TestData123"
payload = self.upload_payload(history_id, content=content, **kwds)
run_response = self.tools_post(payload)
if wait:
self.wait_for_tool_run(history_id, run_response, assert_ok=kwds.get('assert_ok', True))
return run_response
def fetch(self, payload, assert_ok=True, timeout=DEFAULT_TIMEOUT):
tool_response = self._post("tools/fetch", data=payload)
if assert_ok:
job = self.check_run(tool_response)
self.wait_for_job(job["id"], timeout=timeout)
job = tool_response.json()["jobs"][0]
details = self.get_job_details(job["id"]).json()
assert details["state"] == "ok", details
return tool_response
def wait_for_tool_run(self, history_id, run_response, timeout=DEFAULT_TIMEOUT, assert_ok=True):
job = self.check_run(run_response)
self.wait_for_job(job["id"], timeout=timeout)
self.wait_for_history(history_id, assert_ok=assert_ok, timeout=timeout)
return run_response
def check_run(self, run_response):
run = run_response.json()
assert run_response.status_code == 200, run
job = run["jobs"][0]
return job
def wait_for_history(self, history_id, assert_ok=False, timeout=DEFAULT_TIMEOUT):
try:
return wait_on_state(lambda: self._get("histories/%s" % history_id), desc="history state", assert_ok=assert_ok, timeout=timeout)
except AssertionError:
self._summarize_history(history_id)
raise
def wait_for_history_jobs(self, history_id, assert_ok=False, timeout=DEFAULT_TIMEOUT):
def has_active_jobs():
active_jobs = self.active_history_jobs(history_id)
if len(active_jobs) == 0:
return True
else:
return None
try:
wait_on(has_active_jobs, "active jobs", timeout=timeout)
except TimeoutAssertionError as e:
jobs = self.history_jobs(history_id)
message = "Failed waiting on active jobs to complete, current jobs are [{}]. {}".format(jobs, e)
raise TimeoutAssertionError(message)
if assert_ok:
return self.wait_for_history(history_id, assert_ok=True, timeout=timeout)
def wait_for_job(self, job_id, assert_ok=False, timeout=DEFAULT_TIMEOUT):
return wait_on_state(lambda: self.get_job_details(job_id), desc="job state", assert_ok=assert_ok, timeout=timeout)
def get_job_details(self, job_id, full=False):
return self._get("jobs/{}?full={}".format(job_id, full))
def cancel_history_jobs(self, history_id, wait=True):
active_jobs = self.active_history_jobs(history_id)
for active_job in active_jobs:
self.cancel_job(active_job["id"])
def history_jobs(self, history_id):
query_params = {"history_id": history_id, "order_by": "create_time"}
jobs_response = self._get("jobs", query_params)
assert jobs_response.status_code == 200
return jobs_response.json()
def active_history_jobs(self, history_id):
all_history_jobs = self.history_jobs(history_id)
active_jobs = [j for j in all_history_jobs if j["state"] in ["new", "upload", "waiting", "queued", "running"]]
return active_jobs
def cancel_job(self, job_id):
return self._delete("jobs/%s" % job_id)
def delete_dataset(self, history_id, content_id):
delete_response = self._delete("histories/{}/contents/{}".format(history_id, content_id))
return delete_response
def create_tool_from_path(self, tool_path):
tool_directory = os.path.dirname(os.path.abspath(tool_path))
payload = dict(
src="from_path",
path=tool_path,
tool_directory=tool_directory,
)
return self._create_tool_raw(payload)
def create_tool(self, representation, tool_directory=None):
if isinstance(representation, dict):
representation = json.dumps(representation)
payload = dict(
representation=representation,
tool_directory=tool_directory,
)
return self._create_tool_raw(payload)
def _create_tool_raw(self, payload):
try:
create_response = self._post("dynamic_tools", data=payload, admin=True)
except TypeError:
create_response = self._post("dynamic_tools", data=payload)
assert create_response.status_code == 200, create_response.text
return create_response.json()
def list_dynamic_tools(self):
list_response = self._get("dynamic_tools", admin=True)
assert list_response.status_code == 200, list_response
return list_response.json()
def show_dynamic_tool(self, uuid):
show_response = self._get("dynamic_tools/%s" % uuid, admin=True)
assert show_response.status_code == 200, show_response
return show_response.json()
def deactivate_dynamic_tool(self, uuid):
delete_response = self._delete("dynamic_tools/%s" % uuid, admin=True)
return delete_response.json()
def _summarize_history(self, history_id):
pass
@contextlib.contextmanager
def test_history(self, **kwds):
cleanup = "GALAXY_TEST_NO_CLEANUP" not in os.environ
def wrap_up():
cancel_executions = kwds.get("cancel_executions", True)
if cleanup and cancel_executions:
self.cancel_history_jobs(history_id)
require_new = kwds.get("require_new", True)
try:
history_id = None
if not require_new:
history_id = kwds.get("GALAXY_TEST_HISTORY_ID", None)
history_id = history_id or self.new_history()
yield history_id
wrap_up()
except Exception:
self._summarize_history(history_id)
wrap_up()
raise
def new_history(self, **kwds):
name = kwds.get("name", "API Test History")
create_history_response = self._post("histories", data=dict(name=name))
assert "id" in create_history_response.json(), create_history_response.text
history_id = create_history_response.json()["id"]
return history_id
def upload_payload(self, history_id, content=None, **kwds):
name = kwds.get("name", "Test_Dataset")
dbkey = kwds.get("dbkey", "?")
file_type = kwds.get("file_type", 'txt')
upload_params = {
'files_0|NAME': name,
'dbkey': dbkey,
'file_type': file_type,
}
if dbkey is None:
del upload_params["dbkey"]
if content is None:
upload_params["files_0|ftp_files"] = kwds.get("ftp_files")
elif hasattr(content, 'read'):
upload_params["files_0|file_data"] = content
else:
upload_params['files_0|url_paste'] = content
if "to_posix_lines" in kwds:
upload_params["files_0|to_posix_lines"] = kwds["to_posix_lines"]
if "space_to_tab" in kwds:
upload_params["files_0|space_to_tab"] = kwds["space_to_tab"]
if "auto_decompress" in kwds:
upload_params["files_0|auto_decompress"] = kwds["auto_decompress"]
upload_params.update(kwds.get("extra_inputs", {}))
return self.run_tool_payload(
tool_id='upload1',
inputs=upload_params,
history_id=history_id,
upload_type='upload_dataset'
)
def get_remote_files(self, target="ftp"):
return self._get("remote_files", data={"target": target}).json()
def run_tool_payload(self, tool_id, inputs, history_id, **kwds):
# Remove files_%d|file_data parameters from inputs dict and attach
# as __files dictionary.
for key, value in list(inputs.items()):
if key.startswith("files_") and key.endswith("|file_data"):
if "__files" not in kwds:
kwds["__files"] = {}
kwds["__files"][key] = value
del inputs[key]
return dict(
tool_id=tool_id,
inputs=json.dumps(inputs),
history_id=history_id,
**kwds
)
def run_tool(self, tool_id, inputs, history_id, assert_ok=True, **kwds):
payload = self.run_tool_payload(tool_id, inputs, history_id, **kwds)
tool_response = self.tools_post(payload)
if assert_ok:
api_asserts.assert_status_code_is(tool_response, 200)
return tool_response.json()
else:
return tool_response
def tools_post(self, payload, url="tools"):
tool_response = self._post(url, data=payload)
return tool_response
def get_history_dataset_content(self, history_id, wait=True, filename=None, type='text', raw=False, **kwds):
dataset_id = self.__history_content_id(history_id, wait=wait, **kwds)
data = {}
if filename:
data["filename"] = filename
if raw:
data['raw'] = True
display_response = self._get_contents_request(history_id, "/%s/display" % dataset_id, data=data)
assert display_response.status_code == 200, display_response.text
if type == 'text':
return display_response.text
else:
return display_response.content
def get_history_dataset_details(self, history_id, **kwds):
dataset_id = self.__history_content_id(history_id, **kwds)
details_response = self._get_contents_request(history_id, "/datasets/%s" % dataset_id)
assert details_response.status_code == 200
return details_response.json()
def get_history_dataset_extra_files(self, history_id, **kwds):
dataset_id = self.__history_content_id(history_id, **kwds)
details_response = self._get_contents_request(history_id, "/%s/extra_files" % dataset_id)
assert details_response.status_code == 200, details_response.content
return details_response.json()
def get_history_collection_details(self, history_id, **kwds):
hdca_id = self.__history_content_id(history_id, **kwds)
details_response = self._get_contents_request(history_id, "/dataset_collections/%s" % hdca_id)
assert details_response.status_code == 200, details_response.content
return details_response.json()
def run_collection_creates_list(self, history_id, hdca_id):
inputs = {
"input1": {"src": "hdca", "id": hdca_id},
}
self.wait_for_history(history_id, assert_ok=True)
return self.run_tool("collection_creates_list", inputs, history_id)
def run_exit_code_from_file(self, history_id, hdca_id):
exit_code_inputs = {
"input": {'batch': True, 'values': [{"src": "hdca", "id": hdca_id}]},
}
response = self.run_tool("exit_code_from_file", exit_code_inputs, history_id, assert_ok=False).json()
self.wait_for_history(history_id, assert_ok=False)
return response
def __history_content_id(self, history_id, wait=True, **kwds):
if wait:
assert_ok = kwds.get("assert_ok", True)
self.wait_for_history(history_id, assert_ok=assert_ok)
# kwds should contain a 'dataset' object response, a 'dataset_id' or
# the last dataset in the history will be fetched.
if "dataset_id" in kwds:
history_content_id = kwds["dataset_id"]
elif "content_id" in kwds:
history_content_id = kwds["content_id"]
elif "dataset" in kwds:
history_content_id = kwds["dataset"]["id"]
else:
hid = kwds.get("hid", None) # If not hid, just grab last dataset
history_contents = self._get_contents_request(history_id).json()
if hid:
history_content_id = None
for history_item in history_contents:
if history_item["hid"] == hid:
history_content_id = history_item["id"]
if history_content_id is None:
raise Exception("Could not find content with HID [{}] in [{}]".format(hid, history_contents))
else:
# No hid specified - just grab most recent element.
history_content_id = history_contents[-1]["id"]
return history_content_id
def _get_contents_request(self, history_id, suffix="", data=None):
if data is None:
data = {}
url = "histories/%s/contents" % history_id
if suffix:
url = "{}{}".format(url, suffix)
return self._get(url, data=data)
def ds_entry(self, history_content):
src = 'hda'
if 'history_content_type' in history_content and history_content['history_content_type'] == "dataset_collection":
src = 'hdca'
return dict(src=src, id=history_content["id"])
def get_roles(self):
roles_response = self.galaxy_interactor.get("roles", admin=True)
assert roles_response.status_code == 200
return roles_response.json()
def user_email(self):
users_response = self.galaxy_interactor.get("users")
users = users_response.json()
assert len(users) == 1
return users[0]["email"]
def user_id(self):
users_response = self.galaxy_interactor.get("users")
users = users_response.json()
assert len(users) == 1
return users[0]["id"]
def user_private_role_id(self):
user_email = self.user_email()
roles = self.get_roles()
users_roles = [r for r in roles if r["name"] == user_email]
assert len(users_roles) == 1
return users_roles[0]["id"]
def create_role(self, user_ids, description=None):
payload = {
"name": self.get_random_name(prefix="testpop"),
"description": description or "Test Role",
"user_ids": json.dumps(user_ids),
}
role_response = self.galaxy_interactor.post("roles", data=payload, admin=True)
assert role_response.status_code == 200
return role_response.json()[0]
def make_private(self, history_id, dataset_id):
role_id = self.user_private_role_id()
# Give manage permission to the user.
payload = {
"access": json.dumps([role_id]),
"manage": json.dumps([role_id]),
}
url = "histories/{}/contents/{}/permissions".format(history_id, dataset_id)
update_response = self.galaxy_interactor._put(url, payload, admin=True)
assert update_response.status_code == 200, update_response.content
return update_response.json()
def validate_dataset(self, history_id, dataset_id):
url = "histories/{}/contents/{}/validate".format(history_id, dataset_id)
update_response = self.galaxy_interactor._put(url, {})
assert update_response.status_code == 200, update_response.content
return update_response.json()
def validate_dataset_and_wait(self, history_id, dataset_id):
self.validate_dataset(history_id, dataset_id)
def validated():
metadata = self.get_history_dataset_details(history_id, dataset_id=dataset_id)
validated_state = metadata['validated_state']
if validated_state == 'unknown':
return
else:
return validated_state
return wait_on(
validated,
"dataset validation"
)
def export_url(self, history_id, data, check_download=True):
url = "histories/%s/exports" % history_id
put_response = self._put(url, data)
api_asserts.assert_status_code_is(put_response, 202)
def export_ready_response():
put_response = self._put(url)
if put_response.status_code == 202:
return None
return put_response
put_response = wait_on(export_ready_response, desc="export ready")
api_asserts.assert_status_code_is(put_response, 200)
response = put_response.json()
api_asserts.assert_has_keys(response, "download_url")
download_url = response["download_url"]
if check_download:
self.get_export_url(download_url)
return download_url
def get_export_url(self, export_url):
full_download_url = "{}?key={}".format(export_url, self._api_key)
download_response = self._get(full_download_url)
api_asserts.assert_status_code_is(download_response, 200)
return download_response
def import_history(self, import_data):
files = {}
archive_file = import_data.pop("archive_file", None)
if archive_file:
files["archive_file"] = archive_file
import_response = self._post("histories", data=import_data, files=files)
api_asserts.assert_status_code_is(import_response, 200)
def import_history_and_wait_for_name(self, import_data, history_name):
def history_names():
return {h["name"]: h for h in self.get_histories()}
import_name = "imported from archive: %s" % history_name
assert import_name not in history_names()
self.import_history(import_data)
def has_history_with_name():
histories = history_names()
return histories.get(import_name, None)
imported_history = wait_on(has_history_with_name, desc="import history")
imported_history_id = imported_history["id"]
self.wait_for_history(imported_history_id)
return imported_history_id
def rename_history(self, history_id, new_name):
update_url = "histories/%s" % history_id
put_response = self._put(update_url, {"name": new_name})
return put_response
def get_histories(self):
history_index_response = self._get("histories")
api_asserts.assert_status_code_is(history_index_response, 200)
return history_index_response.json()
def wait_on_history_length(self, history_id, wait_on_history_length):
def history_has_length():
history_length = self.history_length(history_id)
return None if history_length != wait_on_history_length else True
wait_on(history_has_length, desc="import history population")
def history_length(self, history_id):
contents_response = self._get("histories/%s/contents" % history_id)
api_asserts.assert_status_code_is(contents_response, 200)
contents = contents_response.json()
return len(contents)
def reimport_history(self, history_id, history_name, wait_on_history_length, export_kwds, url, api_key):
# Export the history.
download_path = self.export_url(history_id, export_kwds, check_download=True)
# Create download for history
full_download_url = "{}{}?key={}".format(url, download_path, api_key)
import_data = dict(archive_source=full_download_url, archive_type="url")
imported_history_id = self.import_history_and_wait_for_name(import_data, history_name)
if wait_on_history_length:
self.wait_on_history_length(imported_history_id, wait_on_history_length)
return imported_history_id
def get_random_name(self, prefix=None, suffix=None, len=10):
# stolen from navigates_galaxy.py
return '{}{}{}'.format(
prefix or '',
''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(len)),
suffix or '',
)
class DatasetPopulator(BaseDatasetPopulator):
def __init__(self, galaxy_interactor):
self.galaxy_interactor = galaxy_interactor
@property
def _api_key(self):
return self.galaxy_interactor.api_key
def _post(self, route, data=None, files=None, admin=False):
return self.galaxy_interactor.post(route, data, files=files, admin=admin)
def _put(self, route, data=None):
return self.galaxy_interactor.put(route, data)
def _get(self, route, data=None, admin=False):
if data is None:
data = {}
return self.galaxy_interactor.get(route, data=data, admin=admin)
def _delete(self, route, data=None, admin=False):
if data is None:
data = {}
return self.galaxy_interactor.delete(route, data=data, admin=admin)
def _summarize_history(self, history_id):
self.galaxy_interactor._summarize_history(history_id)
def wait_for_dataset(self, history_id, dataset_id, assert_ok=False, timeout=DEFAULT_TIMEOUT):
return wait_on_state(lambda: self._get("histories/{}/contents/{}".format(history_id, dataset_id)), desc="dataset state", assert_ok=assert_ok, timeout=timeout)
class BaseWorkflowPopulator:
def load_workflow(self, name, content=workflow_str, add_pja=False):
workflow = json.loads(content)
workflow["name"] = name
if add_pja:
tool_step = workflow["steps"]["2"]
tool_step["post_job_actions"]["RenameDatasetActionout_file1"] = dict(
action_type="RenameDatasetAction",
output_name="out_file1",
action_arguments=dict(newname="foo ${replaceme}"),
)
return workflow
def load_random_x2_workflow(self, name):
return self.load_workflow(name, content=workflow_random_x2_str)
def load_workflow_from_resource(self, name, filename=None):
if filename is None:
filename = "data/%s.ga" % name
content = unicodify(resource_string(__name__, filename))
return self.load_workflow(name, content=content)
def simple_workflow(self, name, **create_kwds):
workflow = self.load_workflow(name)
return self.create_workflow(workflow, **create_kwds)
def import_workflow_from_path(self, from_path):
data = dict(
from_path=from_path
)
import_response = self._post("workflows", data=data)
api_asserts.assert_status_code_is(import_response, 200)
return import_response.json()["id"]
def create_workflow(self, workflow, **create_kwds):
upload_response = self.create_workflow_response(workflow, **create_kwds)
uploaded_workflow_id = upload_response.json()["id"]
return uploaded_workflow_id
def create_workflow_response(self, workflow, **create_kwds):
data = dict(
workflow=json.dumps(workflow),
**create_kwds
)
upload_response = self._post("workflows/upload", data=data)
return upload_response
def upload_yaml_workflow(self, has_yaml, **kwds):
round_trip_conversion = kwds.get("round_trip_format_conversion", False)
client_convert = kwds.pop("client_convert", not round_trip_conversion)
kwds["convert"] = client_convert
workflow = convert_and_import_workflow(has_yaml, galaxy_interface=self, **kwds)
workflow_id = workflow["id"]
if round_trip_conversion:
workflow_yaml_wrapped = self.download_workflow(workflow_id, style="format2_wrapped_yaml")
assert "yaml_content" in workflow_yaml_wrapped, workflow_yaml_wrapped
round_trip_converted_content = workflow_yaml_wrapped["yaml_content"]
workflow_id = self.upload_yaml_workflow(round_trip_converted_content, client_convert=False, round_trip_conversion=False)
return workflow_id
def wait_for_invocation(self, workflow_id, invocation_id, timeout=DEFAULT_TIMEOUT):
url = "workflows/{}/usage/{}".format(workflow_id, invocation_id)
return wait_on_state(lambda: self._get(url), desc="workflow invocation state", timeout=timeout)
def history_invocations(self, history_id):
history_invocations_response = self._get("invocations", {"history_id": history_id})
api_asserts.assert_status_code_is(history_invocations_response, 200)
return history_invocations_response.json()
def wait_for_history_workflows(self, history_id, assert_ok=True, timeout=DEFAULT_TIMEOUT, expected_invocation_count=None):
if expected_invocation_count is not None:
def invocation_count():
invocations = self.history_invocations(history_id)
if len(invocations) == expected_invocation_count:
return True
wait_on(invocation_count, "%s history invocations" % expected_invocation_count)
for invocation in self.history_invocations(history_id):
workflow_id = invocation["workflow_id"]
invocation_id = invocation["id"]
self.wait_for_workflow(workflow_id, invocation_id, history_id, timeout=timeout, assert_ok=assert_ok)
def wait_for_workflow(self, workflow_id, invocation_id, history_id, assert_ok=True, timeout=DEFAULT_TIMEOUT):
""" Wait for a workflow invocation to completely schedule and then history
to be complete. """
self.wait_for_invocation(workflow_id, invocation_id, timeout=timeout)
self.dataset_populator.wait_for_history_jobs(history_id, assert_ok=assert_ok, timeout=timeout)
def invoke_workflow_raw(self, workflow_id, request):
url = "workflows/%s/usage" % (workflow_id)
invocation_response = self._post(url, data=request)
return invocation_response
def invoke_workflow(self, history_id, workflow_id, inputs=None, request=None, assert_ok=True):
if inputs is None:
inputs = {}
if request is None:
request = {}
request["history"] = "hist_id=%s" % history_id,
if inputs:
request["inputs"] = json.dumps(inputs)
request["inputs_by"] = 'step_index'
invocation_response = self.invoke_workflow_raw(workflow_id, request)
if assert_ok:
api_asserts.assert_status_code_is(invocation_response, 200)
invocation_id = invocation_response.json()["id"]
return invocation_id
else:
return invocation_response
def workflow_report_json(self, workflow_id, invocation_id):
response = self._get("workflows/{}/invocations/{}/report".format(workflow_id, invocation_id))
api_asserts.assert_status_code_is(response, 200)
return response.json()
def download_workflow(self, workflow_id, style=None):
params = {}
if style is not None:
params["style"] = style
response = self._get("workflows/%s/download" % workflow_id, data=params)
api_asserts.assert_status_code_is(response, 200)
if style != "format2":
return response.json()
else:
return ordered_load(response.text)
def update_workflow(self, workflow_id, workflow_object):
data = dict(
workflow=workflow_object
)
raw_url = 'workflows/%s' % workflow_id
put_response = self.galaxy_interactor._put(raw_url, data=json.dumps(data))
return put_response
@contextlib.contextmanager
def export_for_update(self, workflow_id):
workflow_object = self.download_workflow(workflow_id)
yield workflow_object
self.update_workflow(workflow_id, workflow_object)
def run_workflow(self, has_workflow, test_data=None, history_id=None, wait=True, source_type=None, jobs_descriptions=None, expected_response=200, assert_ok=True, client_convert=None, round_trip_format_conversion=False, raw_yaml=False):
"""High-level wrapper around workflow API, etc. to invoke format 2 workflows."""
workflow_populator = self
if client_convert is None:
client_convert = not round_trip_format_conversion
workflow_id = workflow_populator.upload_yaml_workflow(has_workflow, source_type=source_type, client_convert=client_convert, round_trip_format_conversion=round_trip_format_conversion, raw_yaml=raw_yaml)
if test_data is None:
if jobs_descriptions is None:
assert source_type != "path"
jobs_descriptions = yaml.safe_load(has_workflow)
test_data = jobs_descriptions.get("test_data", {})
if not isinstance(test_data, dict):
test_data = yaml.safe_load(test_data)
parameters = test_data.pop('step_parameters', {})
replacement_parameters = test_data.pop("replacement_parameters", {})
inputs, label_map, has_uploads = load_data_dict(history_id, test_data, self.dataset_populator, self.dataset_collection_populator)
workflow_request = dict(
history="hist_id=%s" % history_id,
workflow_id=workflow_id,
)
workflow_request["inputs"] = json.dumps(label_map)
workflow_request["inputs_by"] = 'name'
if parameters:
workflow_request["parameters"] = json.dumps(parameters)
workflow_request["parameters_normalized"] = True
if replacement_parameters:
workflow_request["replacement_params"] = json.dumps(replacement_parameters)
if has_uploads:
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
invocation_response = workflow_populator.invoke_workflow_raw(workflow_id, workflow_request)
api_asserts.assert_status_code_is(invocation_response, expected_response)
invocation = invocation_response.json()
if expected_response != 200:
assert not assert_ok
return invocation
invocation_id = invocation.get('id')
if invocation_id:
# Wait for workflow to become fully scheduled and then for all jobs
# complete.
if wait:
workflow_populator.wait_for_workflow(workflow_id, invocation_id, history_id, assert_ok=assert_ok)
jobs = self.dataset_populator.history_jobs(history_id)
return RunJobsSummary(
history_id=history_id,
workflow_id=workflow_id,
invocation_id=invocation_id,
inputs=inputs,
jobs=jobs,
invocation=invocation,
workflow_request=workflow_request
)
def dump_workflow(self, workflow_id, style=None):
raw_workflow = self.download_workflow(workflow_id, style=style)
if style == "format2_wrapped_yaml":
print(raw_workflow["yaml_content"])
else:
print(json.dumps(raw_workflow, sort_keys=True, indent=2))
RunJobsSummary = namedtuple('RunJobsSummary', ['history_id', 'workflow_id', 'invocation_id', 'inputs', 'jobs', 'invocation', 'workflow_request'])
class WorkflowPopulator(BaseWorkflowPopulator, ImporterGalaxyInterface):
def __init__(self, galaxy_interactor):
self.galaxy_interactor = galaxy_interactor
self.dataset_populator = DatasetPopulator(galaxy_interactor)
self.dataset_collection_populator = DatasetCollectionPopulator(galaxy_interactor)
def _post(self, route, data=None, admin=False):
if data is None:
data = {}
return self.galaxy_interactor.post(route, data, admin=admin)
def _get(self, route, data=None):
if data is None:
data = {}
return self.galaxy_interactor.get(route, data=data)
# Required for ImporterGalaxyInterface interface - so we can recurisvely import
# nested workflows.
def import_workflow(self, workflow, **kwds):
workflow_str = json.dumps(workflow, indent=4)
data = {
'workflow': workflow_str,
}
data.update(**kwds)
upload_response = self._post("workflows", data=data)
assert upload_response.status_code == 200, upload_response.content
return upload_response.json()
def import_tool(self, tool):
""" Import a workflow via POST /api/workflows or
comparable interface into Galaxy.
"""
upload_response = self._import_tool_response(tool)
assert upload_response.status_code == 200, upload_response
return upload_response.json()
def _import_tool_response(self, tool):
tool_str = json.dumps(tool, indent=4)
data = {
'representation': tool_str
}
upload_response = self._post("dynamic_tools", data=data, admin=True)
return upload_response
class LibraryPopulator:
def __init__(self, galaxy_interactor):
self.galaxy_interactor = galaxy_interactor
self.dataset_populator = DatasetPopulator(galaxy_interactor)
def get_libraries(self):
get_response = self.galaxy_interactor.get("libraries")
return get_response.json()
def new_private_library(self, name):
library = self.new_library(name)
library_id = library["id"]
role_id = self.user_private_role_id()
self.set_permissions(library_id, role_id)
return library
def new_library(self, name):
data = dict(name=name)
create_response = self.galaxy_interactor.post("libraries", data=data, admin=True)
return create_response.json()
def set_permissions(self, library_id, role_id=None):
if role_id:
perm_list = json.dumps(role_id)
else:
perm_list = json.dumps([])
permissions = dict(
LIBRARY_ACCESS_in=perm_list,
LIBRARY_MODIFY_in=perm_list,
LIBRARY_ADD_in=perm_list,
LIBRARY_MANAGE_in=perm_list,
)
response = self.galaxy_interactor.post("libraries/%s/permissions" % library_id, data=permissions, admin=True)
api_asserts.assert_status_code_is(response, 200)
def user_email(self):
# deprecated - use DatasetPopulator
return self.dataset_populator.user_email()
def user_private_role_id(self):
# deprecated - use DatasetPopulator
return self.dataset_populator.user_private_role_id()
def create_dataset_request(self, library, **kwds):
upload_option = kwds.get("upload_option", "upload_file")
create_data = {
"folder_id": kwds.get("folder_id", library["root_folder_id"]),
"create_type": "file",
"files_0|NAME": kwds.get("name", "NewFile"),
"upload_option": upload_option,
"file_type": kwds.get("file_type", "auto"),
"db_key": kwds.get("db_key", "?"),
}
if kwds.get("link_data"):
create_data["link_data_only"] = "link_to_files"
if upload_option == "upload_file":
files = {
"files_0|file_data": kwds.get("file", StringIO(kwds.get("contents", "TestData"))),
}
elif upload_option == "upload_paths":
create_data["filesystem_paths"] = kwds["paths"]
files = {}
elif upload_option == "upload_directory":
create_data["server_dir"] = kwds["server_dir"]
files = {}
return create_data, files
def new_library_dataset(self, name, **create_dataset_kwds):
library = self.new_private_library(name)
payload, files = self.create_dataset_request(library, **create_dataset_kwds)
dataset = self.raw_library_contents_create(library["id"], payload, files=files).json()[0]
return self.wait_on_library_dataset(library, dataset)
def wait_on_library_dataset(self, library, dataset):
def show():
return self.galaxy_interactor.get("libraries/{}/contents/{}".format(library["id"], dataset["id"]))
wait_on_state(show, assert_ok=True, timeout=DEFAULT_TIMEOUT)
return show().json()
def raw_library_contents_create(self, library_id, payload, files=None):
if files is None:
files = {}
url_rel = "libraries/%s/contents" % library_id
return self.galaxy_interactor.post(url_rel, payload, files=files)
def show_ldda(self, library_id, library_dataset_id):
return self.galaxy_interactor.get("libraries/{}/contents/{}".format(library_id, library_dataset_id))
def new_library_dataset_in_private_library(self, library_name="private_dataset", wait=True):
library = self.new_private_library(library_name)
payload, files = self.create_dataset_request(library, file_type="txt", contents="create_test")
create_response = self.galaxy_interactor.post("libraries/%s/contents" % library["id"], payload, files=files)
api_asserts.assert_status_code_is(create_response, 200)
library_datasets = create_response.json()
assert len(library_datasets) == 1
library_dataset = library_datasets[0]
if wait:
def show():
return self.show_ldda(library["id"], library_dataset["id"])
wait_on_state(show, assert_ok=True)
library_dataset = show().json()
return library, library_dataset
def get_library_contents_with_path(self, library_id, path):
all_contents_response = self.galaxy_interactor.get("libraries/%s/contents" % library_id)
api_asserts.assert_status_code_is(all_contents_response, 200)
all_contents = all_contents_response.json()
matching = [c for c in all_contents if c["name"] == path]
if len(matching) == 0:
raise Exception("Failed to find library contents with path [{}], contents are {}".format(path, all_contents))
get_response = self.galaxy_interactor.get(matching[0]["url"])
api_asserts.assert_status_code_is(get_response, 200)
return get_response.json()
def setup_fetch_to_folder(self, test_name):
history_id = self.dataset_populator.new_history()
library = self.new_private_library(test_name)
folder_id = library["root_folder_id"][1:]
destination = {"type": "library_folder", "library_folder_id": folder_id}
return history_id, library, destination
class BaseDatasetCollectionPopulator:
def create_list_from_pairs(self, history_id, pairs, name="Dataset Collection from pairs"):
return self.create_nested_collection(history_id=history_id,
collection=pairs,
collection_type='list:paired',
name=name)
def nested_collection_identifiers(self, history_id, collection_type):
rank_types = list(reversed(collection_type.split(":")))
assert len(rank_types) > 0
rank_type_0 = rank_types[0]
if rank_type_0 == "list":
identifiers = self.list_identifiers(history_id)
else:
identifiers = self.pair_identifiers(history_id)
nested_collection_type = rank_type_0
for i, rank_type in enumerate(reversed(rank_types[1:])):
name = "test_level_%d" % (i + 1) if rank_type == "list" else "paired"
identifiers = [dict(
src="new_collection",
name=name,
collection_type=nested_collection_type,
element_identifiers=identifiers,
)]
nested_collection_type = "{}:{}".format(rank_type, nested_collection_type)
return identifiers
def create_nested_collection(self, history_id, collection_type, name=None, collection=None, element_identifiers=None):
"""Create a nested collection either from collection or using collection_type)."""
assert collection_type is not None
name = name or "Test %s" % collection_type
if collection is not None:
assert element_identifiers is None
element_identifiers = []
for i, pair in enumerate(collection):
element_identifiers.append(dict(
name="test%d" % i,
src="hdca",
id=pair
))
if element_identifiers is None:
element_identifiers = self.nested_collection_identifiers(history_id, collection_type)
payload = dict(
instance_type="history",
history_id=history_id,
element_identifiers=json.dumps(element_identifiers),
collection_type=collection_type,
name=name,
)
return self.__create(payload)
def create_list_of_pairs_in_history(self, history_id, **kwds):
return self.upload_collection(history_id, "list:paired", elements=[
{
"name": "test0",
"elements": [
{"src": "pasted", "paste_content": "TestData123", "name": "forward"},
{"src": "pasted", "paste_content": "TestData123", "name": "reverse"},
]
}
])
def create_list_of_list_in_history(self, history_id, **kwds):
# create_nested_collection will generate nested collection from just datasets,
# this function uses recursive generation of history hdcas.
collection_type = kwds.pop('collection_type', 'list:list')
collection_types = collection_type.split(':')
list = self.create_list_in_history(history_id, **kwds).json()['id']
current_collection_type = 'list'
for collection_type in collection_types[1:]:
current_collection_type = "{}:{}".format(current_collection_type, collection_type)
response = self.create_nested_collection(history_id=history_id,
collection_type=current_collection_type,
name=current_collection_type,
collection=[list])
list = response.json()['id']
return response
def create_pair_in_history(self, history_id, **kwds):
payload = self.create_pair_payload(
history_id,
instance_type="history",
**kwds
)
return self.__create(payload)
def create_list_in_history(self, history_id, **kwds):
payload = self.create_list_payload(
history_id,
instance_type="history",
**kwds
)
return self.__create(payload)
def upload_collection(self, history_id, collection_type, elements, **kwds):
payload = self.__create_payload_fetch(history_id, collection_type, contents=elements, **kwds)
return self.__create(payload)
def create_list_payload(self, history_id, **kwds):
return self.__create_payload(history_id, identifiers_func=self.list_identifiers, collection_type="list", **kwds)
def create_pair_payload(self, history_id, **kwds):
return self.__create_payload(history_id, identifiers_func=self.pair_identifiers, collection_type="paired", **kwds)
def __create_payload(self, *args, **kwds):
direct_upload = kwds.pop("direct_upload", False)
if direct_upload:
return self.__create_payload_fetch(*args, **kwds)
else:
return self.__create_payload_collection(*args, **kwds)
def __create_payload_fetch(self, history_id, collection_type, **kwds):
files = []
contents = None
if "contents" in kwds:
contents = kwds["contents"]
del kwds["contents"]
elements = []
if contents is None:
if collection_type == "paired":
contents = [("forward", "TestData123"), ("reverse", "TestData123")]
elif collection_type == "list":
contents = ["TestData123", "TestData123", "TestData123"]
else:
raise Exception("Unknown collection_type %s" % collection_type)
if isinstance(contents, list):
for i, contents_level in enumerate(contents):
# If given a full collection definition pass as is.
if isinstance(contents_level, dict):
elements.append(contents_level)
continue
element = {"src": "pasted", "ext": "txt"}
# Else older style list of contents or element ID and contents,
# convert to fetch API.
if isinstance(contents_level, tuple):
# (element_identifier, contents)
element_identifier = contents_level[0]
dataset_contents = contents_level[1]
else:
dataset_contents = contents_level
if collection_type == "list":
element_identifier = "data%d" % i
elif collection_type == "paired" and i == 0:
element_identifier = "forward"
else:
element_identifier = "reverse"
element["name"] = element_identifier
element["paste_content"] = dataset_contents
elements.append(element)
name = kwds.get("name", "Test Dataset Collection")
files_request_part = {}
for i, content in enumerate(files):
files_request_part["files_%d|file_data" % i] = StringIO(content)
targets = [{
"destination": {"type": "hdca"},
"elements": elements,
"collection_type": collection_type,
"name": name,
}]
payload = dict(
history_id=history_id,
targets=json.dumps(targets),
__files=files_request_part,
)
return payload
def wait_for_fetched_collection(self, fetch_response):
self.dataset_populator.wait_for_job(fetch_response["jobs"][0]["id"], assert_ok=True)
initial_dataset_collection = fetch_response["outputs"][0]
dataset_collection = self.dataset_populator.get_history_collection_details(initial_dataset_collection["history_id"], hid=initial_dataset_collection["hid"])
return dataset_collection
def __create_payload_collection(self, history_id, identifiers_func, collection_type, **kwds):
contents = None
if "contents" in kwds:
contents = kwds["contents"]
del kwds["contents"]
if "element_identifiers" not in kwds:
kwds["element_identifiers"] = json.dumps(identifiers_func(history_id, contents=contents))
if "name" not in kwds:
kwds["name"] = "Test Dataset Collection"
payload = dict(
history_id=history_id,
collection_type=collection_type,
**kwds
)
return payload
def pair_identifiers(self, history_id, contents=None):
hda1, hda2 = self.__datasets(history_id, count=2, contents=contents)
element_identifiers = [
dict(name="forward", src="hda", id=hda1["id"]),
dict(name="reverse", src="hda", id=hda2["id"]),
]
return element_identifiers
def list_identifiers(self, history_id, contents=None):
count = 3 if contents is None else len(contents)
# Contents can be a list of strings (with name auto-assigned here) or a list of
# 2-tuples of form (name, dataset_content).
if contents and isinstance(contents[0], tuple):
hdas = self.__datasets(history_id, count=count, contents=[c[1] for c in contents])
def hda_to_identifier(i, hda):
return dict(name=contents[i][0], src="hda", id=hda["id"])
else:
hdas = self.__datasets(history_id, count=count, contents=contents)
def hda_to_identifier(i, hda):
return dict(name="data%d" % (i + 1), src="hda", id=hda["id"])
element_identifiers = [hda_to_identifier(i, hda) for (i, hda) in enumerate(hdas)]
return element_identifiers
def __create(self, payload):
# Create a colleciton - either from existing datasets using collection creation API
# or from direct uploads with the fetch API. Dispatch on "targets" keyword in payload
# to decide which to use.
if "targets" not in payload:
return self._create_collection(payload)
else:
return self.dataset_populator.fetch(payload)
def __datasets(self, history_id, count, contents=None):
datasets = []
for i in range(count):
new_kwds = {}
if contents:
new_kwds["content"] = contents[i]
datasets.append(self.dataset_populator.new_dataset(history_id, **new_kwds))
return datasets
def wait_for_dataset_collection(self, create_payload, assert_ok=False, timeout=DEFAULT_TIMEOUT):
for element in create_payload["elements"]:
if element['element_type'] == 'hda':
self.dataset_populator.wait_for_dataset(history_id=element['object']['history_id'],
dataset_id=element['object']['id'],
assert_ok=assert_ok,
timeout=timeout)
elif element['element_type'] == 'dataset_collection':
self.wait_for_dataset_collection(element['object'], assert_ok=assert_ok, timeout=timeout)
class DatasetCollectionPopulator(BaseDatasetCollectionPopulator):
def __init__(self, galaxy_interactor):
self.galaxy_interactor = galaxy_interactor
self.dataset_populator = DatasetPopulator(galaxy_interactor)
def _create_collection(self, payload):
create_response = self.galaxy_interactor.post("dataset_collections", data=payload)
return create_response
def load_data_dict(history_id, test_data, dataset_populator, dataset_collection_populator):
"""Load a dictionary as inputs to a workflow (test data focused)."""
def open_test_data(test_dict, mode="rb"):
test_data_resolver = TestDataResolver()
filename = test_data_resolver.get_filename(test_dict["value"])
return open(filename, mode)
def read_test_data(test_dict):
return open_test_data(test_dict, mode="r").read()
inputs = {}
label_map = {}
has_uploads = False
for key, value in test_data.items():
is_dict = isinstance(value, dict)
if is_dict and ("elements" in value or value.get("type", None) in ["list:paired", "list", "paired"]):
elements_data = value.get("elements", [])
elements = []
for element_data in elements_data:
# Adapt differences between test_data dict and fetch API description.
if "name" not in element_data:
identifier = element_data["identifier"]
element_data["name"] = identifier
input_type = element_data.get("type", "raw")
content = None
if input_type == "File":
content = read_test_data(element_data)
else:
content = element_data["content"]
if content is not None:
element_data["src"] = "pasted"
element_data["paste_content"] = content
elements.append(element_data)
# TODO: make this collection_type
collection_type = value["type"]
new_collection_kwds = {}
if "name" in value:
new_collection_kwds["name"] = value["name"]
if collection_type == "list:paired":
fetch_response = dataset_collection_populator.create_list_of_pairs_in_history(history_id, contents=elements, **new_collection_kwds).json()
elif collection_type == "list":
fetch_response = dataset_collection_populator.create_list_in_history(history_id, contents=elements, direct_upload=True, **new_collection_kwds).json()
else:
fetch_response = dataset_collection_populator.create_pair_in_history(history_id, contents=elements or None, direct_upload=True, **new_collection_kwds).json()
hdca_output = fetch_response["outputs"][0]
hdca = dataset_populator.ds_entry(hdca_output)
hdca["hid"] = hdca_output["hid"]
label_map[key] = hdca
inputs[key] = hdca
has_uploads = True
elif is_dict and "type" in value:
input_type = value["type"]
if input_type == "File":
content = open_test_data(value)
new_dataset_kwds = {
"content": content
}
if "name" in value:
new_dataset_kwds["name"] = value["name"]
if "file_type" in value:
new_dataset_kwds["file_type"] = value["file_type"]
hda = dataset_populator.new_dataset(history_id, **new_dataset_kwds)
label_map[key] = dataset_populator.ds_entry(hda)
has_uploads = True
elif input_type == "raw":
label_map[key] = value["value"]
inputs[key] = value["value"]
elif not is_dict:
has_uploads = True
hda = dataset_populator.new_dataset(history_id, content=value)
label_map[key] = dataset_populator.ds_entry(hda)
inputs[key] = hda
else:
raise ValueError("Invalid test_data def %s" % test_data)
return inputs, label_map, has_uploads
def stage_inputs(galaxy_interactor, history_id, job, use_path_paste=True, use_fetch_api=True, to_posix_lines=True):
"""Alternative to load_data_dict that uses production-style workflow inputs."""
inputs, datasets = InteractorStaging(galaxy_interactor, use_fetch_api=use_fetch_api).stage(
"workflow", history_id=history_id, job=job, use_path_paste=use_path_paste, to_posix_lines=to_posix_lines
)
return inputs, datasets
def stage_rules_example(galaxy_interactor, history_id, example):
"""Wrapper around stage_inputs for staging collections defined by rules spec DSL."""
input_dict = example["test_data"].copy()
input_dict["collection_type"] = input_dict.pop("type")
input_dict["class"] = "Collection"
inputs, _ = stage_inputs(galaxy_interactor, history_id=history_id, job={"input": input_dict})
return inputs
def wait_on_state(state_func, desc="state", skip_states=None, assert_ok=False, timeout=DEFAULT_TIMEOUT):
def get_state():
response = state_func()
assert response.status_code == 200, "Failed to fetch state update while waiting. [%s]" % response.content
state = response.json()["state"]
if state in skip_states:
return None
else:
if assert_ok:
assert state == "ok", "Final state - %s - not okay." % state
return state
if skip_states is None:
skip_states = ["running", "queued", "new", "ready"]
try:
return wait_on(get_state, desc=desc, timeout=timeout)
except TimeoutAssertionError as e:
response = state_func()
raise TimeoutAssertionError("{} Current response containing state [{}].".format(e, response.json()))
class GiPostGetMixin:
"""Mixin for adapting Galaxy testing populators helpers to bioblend."""
@property
def _api_key(self):
return self._gi.key
def _api_url(self):
return self._gi.url
def _get(self, route, data=None):
if data is None:
data = {}
return self._gi.make_get_request(self._url(route), data=data)
def _post(self, route, data=None):
if data is None:
data = {}
data = data.copy()
data['key'] = self._gi.key
return requests.post(self._url(route), data=data)
def _put(self, route, data=None):
if data is None:
data = {}
data = data.copy()
data['key'] = self._gi.key
return requests.put(self._url(route), data=data)
def _delete(self, route, data=None):
if data is None:
data = {}
data = data.copy()
data['key'] = self._gi.key
return requests.delete(self._url(route), data=data)
def _url(self, route):
if route.startswith("/api/"):
route = route[len("/api/"):]
return self._api_url() + "/" + route
class GiDatasetPopulator(BaseDatasetPopulator, GiPostGetMixin):
"""Implementation of BaseDatasetPopulator backed by bioblend."""
def __init__(self, gi):
"""Construct a dataset populator from a bioblend GalaxyInstance."""
self._gi = gi
class GiDatasetCollectionPopulator(BaseDatasetCollectionPopulator, GiPostGetMixin):
"""Implementation of BaseDatasetCollectionPopulator backed by bioblend."""
def __init__(self, gi):
"""Construct a dataset collection populator from a bioblend GalaxyInstance."""
self._gi = gi
self.dataset_populator = GiDatasetPopulator(gi)
self.dataset_collection_populator = GiDatasetCollectionPopulator(gi)
def _create_collection(self, payload):
create_response = self._post("dataset_collections", data=payload)
return create_response
class GiWorkflowPopulator(BaseWorkflowPopulator, GiPostGetMixin):
"""Implementation of BaseWorkflowPopulator backed by bioblend."""
def __init__(self, gi):
"""Construct a workflow populator from a bioblend GalaxyInstance."""
self._gi = gi
self.dataset_populator = GiDatasetPopulator(gi)
def wait_on(function, desc, timeout=DEFAULT_TIMEOUT):
return tool_util_wait_on(function, desc, timeout)
|
[] |
[] |
[
"GALAXY_TEST_SKIP_FLAKEY_TESTS_ON_ERROR"
] |
[]
|
["GALAXY_TEST_SKIP_FLAKEY_TESTS_ON_ERROR"]
|
python
| 1 | 0 | |
module2-sql-for-analysis/insert_titanic.py
|
import psycopg2
import sqlite3
import os
from dotenv import load_dotenv
import pandas as pd
from sqlalchemy import create_engine
load_dotenv()
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PW = os.getenv("DB_PW")
DB_HOST = os.getenv("DB_HOST")
DB_URL = os.getenv("DB_URL")
df = pd.read_csv('titanic.csv')
alchemyEngine = create_engine(DB_URL)
postgreSQLConnection = alchemyEngine.connect()
df.to_sql('titanic_test', postgreSQLConnection, if_exists='fail')
postgreSQLConnection.close()
pg_conn = psycopg2.connect(
dbname=DB_NAME, user=DB_USER,
password=DB_PW, host=DB_HOST
)
pg_curs = pg_conn.cursor()
query = """SELECT
SUM(CASE when "Survived" = 0 THEN 1 else 0 END) as dead,
SUM(CASE when "Survived" = 1 THEN 1 else 0 END) as alive
FROM
titanic_test"""
pg_curs.execute(query)
result = pg_curs.fetchall()
print("Passengers dead: ", result[0][0])
print("Passengers survived: ", result[0][1])
pg_curs.close()
pg_conn.close()
|
[] |
[] |
[
"DB_HOST",
"DB_PW",
"DB_NAME",
"DB_USER",
"DB_URL"
] |
[]
|
["DB_HOST", "DB_PW", "DB_NAME", "DB_USER", "DB_URL"]
|
python
| 5 | 0 | |
userbot/__init__.py
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APPNAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_APIKEY", None)
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS = os.environ.get("GENIUS_API_TOKEN", None)
# Custom (forked) repo URL for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/keselekpermen69/userbutt.git")
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
# Lydia API
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Telegraph
TELEGRAPH_SHORT_NAME = os.environ.get("TELEGRAPH_SHORT_NAME", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
# Stickerchat Module
QUOTES_API_TOKEN = os.environ.get("QUOTES_API_TOKEN", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ISAFK = False
AFKREASON = None
|
[] |
[] |
[
"GOOGLE_CHROME_BIN",
"G_DRIVE_CLIENT_SECRET",
"LYDIA_API_KEY",
"COUNTRY",
"LASTFM_API",
"ANTI_SPAMBOT_SHOUT",
"UPSTREAM_REPO_URL",
"OCR_SPACE_API_KEY",
"HEROKU_APPNAME",
"BIO_PREFIX",
"LOGSPAMMER",
"GENIUS_API_TOKEN",
"TZ_NUMBER",
"LASTFM_PASSWORD",
"TELEGRAPH_SHORT_NAME",
"DATABASE_URL",
"GDRIVE_FOLDER_ID",
"___________PLOX_______REMOVE_____THIS_____LINE__________",
"GIT_REPO_NAME",
"HEROKU_APIKEY",
"CHROME_DRIVER",
"YOUTUBE_API_KEY",
"HEROKU_MEMEZ",
"LASTFM_USERNAME",
"G_DRIVE_CLIENT_ID",
"API_KEY",
"PM_AUTO_BAN",
"DEFAULT_BIO",
"ANTI_SPAMBOT",
"OPEN_WEATHER_MAP_APPID",
"LASTFM_SECRET",
"G_DRIVE_AUTH_TOKEN_DATA",
"WEATHER_DEFCITY",
"STRING_SESSION",
"QUOTES_API_TOKEN",
"CONSOLE_LOGGER_VERBOSE",
"GITHUB_ACCESS_TOKEN",
"ALIVE_NAME",
"BOTLOG_CHATID",
"TMP_DOWNLOAD_DIRECTORY",
"CLEAN_WELCOME",
"REM_BG_API_KEY",
"BOTLOG",
"API_HASH"
] |
[]
|
["GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "LYDIA_API_KEY", "COUNTRY", "LASTFM_API", "ANTI_SPAMBOT_SHOUT", "UPSTREAM_REPO_URL", "OCR_SPACE_API_KEY", "HEROKU_APPNAME", "BIO_PREFIX", "LOGSPAMMER", "GENIUS_API_TOKEN", "TZ_NUMBER", "LASTFM_PASSWORD", "TELEGRAPH_SHORT_NAME", "DATABASE_URL", "GDRIVE_FOLDER_ID", "___________PLOX_______REMOVE_____THIS_____LINE__________", "GIT_REPO_NAME", "HEROKU_APIKEY", "CHROME_DRIVER", "YOUTUBE_API_KEY", "HEROKU_MEMEZ", "LASTFM_USERNAME", "G_DRIVE_CLIENT_ID", "API_KEY", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "WEATHER_DEFCITY", "STRING_SESSION", "QUOTES_API_TOKEN", "CONSOLE_LOGGER_VERBOSE", "GITHUB_ACCESS_TOKEN", "ALIVE_NAME", "BOTLOG_CHATID", "TMP_DOWNLOAD_DIRECTORY", "CLEAN_WELCOME", "REM_BG_API_KEY", "BOTLOG", "API_HASH"]
|
python
| 44 | 0 | |
src/azure-cli/azure/cli/command_modules/network/custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import Counter, OrderedDict
from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id
from knack.log import get_logger
from azure.mgmt.trafficmanager.models import MonitorProtocol, ProfileStatus
# pylint: disable=no-self-use,no-member,too-many-lines,unused-argument
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection, get_property
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.util import CLIError, sdk_no_wait, find_child_item, find_child_collection
from azure.cli.core.azclierror import InvalidArgumentValueError, RequiredArgumentMissingError, \
UnrecognizedArgumentError, ResourceNotFoundError, CLIInternalError
from azure.cli.core.profiles import ResourceType, supported_api_version
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.command_modules.network.zone_file.parse_zone_file import parse_zone_file
from azure.cli.command_modules.network.zone_file.make_zone_file import make_zone_file
import threading
import time
import platform
import subprocess
logger = get_logger(__name__)
# region Utility methods
def _log_pprint_template(template):
import json
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
def _get_default_name(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, True)
def _get_default_id(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, False)
def _get_default_value(balancer, property_name, option_name, return_name):
values = [x.id for x in getattr(balancer, property_name)]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
if not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0].rsplit('/', 1)[1] if return_name else values[0]
# endregion
# region Generic list commands
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list(resource_group_name)
return operation_group.list_all()
def list_vnet(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'virtual_networks', resource_group_name)
def list_express_route_circuits(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'express_route_circuits', resource_group_name)
def create_express_route_auth(cmd, resource_group_name, circuit_name, authorization_name):
ExpressRouteCircuitAuthorization = cmd.get_models('ExpressRouteCircuitAuthorization')
client = network_client_factory(cmd.cli_ctx).express_route_circuit_authorizations
return client.begin_create_or_update(resource_group_name,
circuit_name,
authorization_name,
ExpressRouteCircuitAuthorization())
def list_lbs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'load_balancers', resource_group_name)
def list_nics(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_interfaces', resource_group_name)
def list_nsgs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_security_groups', resource_group_name)
def list_nsg_rules(cmd, resource_group_name, network_security_group_name, include_default=False):
client = network_client_factory(cmd.cli_ctx).network_security_groups
nsg = client.get(resource_group_name, network_security_group_name)
rules = nsg.security_rules
if include_default:
rules = rules + nsg.default_security_rules
return rules
def list_custom_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'custom_ip_prefixes', resource_group_name)
def list_public_ips(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_addresses', resource_group_name)
def list_public_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_prefixes', resource_group_name)
def list_route_tables(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'route_tables', resource_group_name)
def list_application_gateways(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'application_gateways', resource_group_name)
def list_network_watchers(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_watchers', resource_group_name)
# endregion
# region ApplicationGateways
# pylint: disable=too-many-locals
def _is_v2_sku(sku):
return 'v2' in sku
# pylint: disable=too-many-statements
def create_application_gateway(cmd, application_gateway_name, resource_group_name, location=None,
tags=None, no_wait=False, capacity=2,
cert_data=None, cert_password=None, key_vault_secret_id=None,
frontend_port=None, http_settings_cookie_based_affinity='disabled',
http_settings_port=80, http_settings_protocol='Http',
routing_rule_type='Basic', servers=None,
sku=None,
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
subnet='default', subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
connection_draining_timeout=0, enable_http2=None, min_capacity=None, zones=None,
custom_error_pages=None, firewall_policy=None, max_capacity=None,
user_assigned_identity=None,
enable_private_link=False,
private_link_ip_address=None,
private_link_subnet='PrivateLinkDefaultSubnet',
private_link_subnet_prefix='10.0.1.0/24',
private_link_primary=None,
trusted_client_cert=None,
ssl_profile=None,
ssl_profile_id=None,
ssl_cert_name=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_application_gateway_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
sku_tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
http_listener_protocol = 'https' if (cert_data or key_vault_secret_id) else 'http'
private_ip_allocation = 'Static' if private_ip_address else 'Dynamic'
virtual_network_name = virtual_network_name or '{}Vnet'.format(application_gateway_name)
# Build up the ARM template
master_template = ArmTemplateBuilder()
ag_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if subnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix,
enable_private_link=enable_private_link,
private_link_subnet=private_link_subnet,
private_link_subnet_prefix=private_link_subnet_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name, subnet)
if public_ip_address_type == 'new':
ag_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
public_ip_sku = None
if _is_v2_sku(sku):
public_ip_sku = 'Standard'
public_ip_address_allocation = 'Static'
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
None, public_ip_sku, None))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
private_link_subnet_id = None
private_link_name = 'PrivateLinkDefaultConfiguration'
private_link_ip_allocation_method = 'Dynamic'
if enable_private_link:
private_link_subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name,
private_link_subnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
app_gateway_resource = build_application_gateway_resource(
cmd, application_gateway_name, location, tags, sku, sku_tier, capacity, servers, frontend_port,
private_ip_address, private_ip_allocation, cert_data, cert_password, key_vault_secret_id,
http_settings_cookie_based_affinity, http_settings_protocol, http_settings_port,
http_listener_protocol, routing_rule_type, public_ip_id, subnet_id,
connection_draining_timeout, enable_http2, min_capacity, zones, custom_error_pages,
firewall_policy, max_capacity, user_assigned_identity,
enable_private_link, private_link_name,
private_link_ip_address, private_link_ip_allocation_method, private_link_primary,
private_link_subnet_id, trusted_client_cert, ssl_profile, ssl_profile_id, ssl_cert_name)
app_gateway_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(
application_gateway_name))
master_template.add_resource(app_gateway_resource)
master_template.add_output('applicationGateway', application_gateway_name, output_type='object')
if cert_password:
master_template.add_secure_parameter('certPassword', cert_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'ag_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_application_gateway(cmd, instance, sku=None, capacity=None, tags=None, enable_http2=None, min_capacity=None,
custom_error_pages=None, max_capacity=None):
if sku is not None:
instance.sku.tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
try:
if min_capacity is not None:
instance.autoscale_configuration.min_capacity = min_capacity
if max_capacity is not None:
instance.autoscale_configuration.max_capacity = max_capacity
except AttributeError:
instance.autoscale_configuration = {
'min_capacity': min_capacity,
'max_capacity': max_capacity
}
with cmd.update_context(instance) as c:
c.set_param('sku.name', sku)
c.set_param('sku.capacity', capacity)
c.set_param('tags', tags)
c.set_param('enable_http2', enable_http2)
c.set_param('custom_error_configurations', custom_error_pages)
return instance
def create_ag_authentication_certificate(cmd, resource_group_name, application_gateway_name, item_name,
cert_data, no_wait=False):
AuthCert = cmd.get_models('ApplicationGatewayAuthenticationCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_cert = AuthCert(data=cert_data, name=item_name)
upsert_to_collection(ag, 'authentication_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_authentication_certificate(instance, parent, item_name, cert_data):
instance.data = cert_data
return parent
def create_ag_backend_address_pool(cmd, resource_group_name, application_gateway_name, item_name,
servers=None, no_wait=False):
ApplicationGatewayBackendAddressPool = cmd.get_models('ApplicationGatewayBackendAddressPool')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_pool = ApplicationGatewayBackendAddressPool(name=item_name, backend_addresses=servers)
upsert_to_collection(ag, 'backend_address_pools', new_pool, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_address_pool(instance, parent, item_name, servers=None):
if servers is not None:
instance.backend_addresses = servers
return parent
def create_ag_frontend_ip_configuration(cmd, resource_group_name, application_gateway_name, item_name,
public_ip_address=None, subnet=None,
virtual_network_name=None, private_ip_address=None,
private_ip_address_allocation=None, no_wait=False):
ApplicationGatewayFrontendIPConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayFrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if public_ip_address:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address))
else:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address if private_ip_address else None,
private_ip_allocation_method='Static' if private_ip_address else 'Dynamic',
subnet=SubResource(id=subnet))
upsert_to_collection(ag, 'frontend_ip_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_ip_configuration(cmd, instance, parent, item_name, public_ip_address=None,
subnet=None, virtual_network_name=None,
private_ip_address=None):
SubResource = cmd.get_models('SubResource')
if public_ip_address is not None:
instance.public_ip_address = SubResource(id=public_ip_address)
if subnet is not None:
instance.subnet = SubResource(id=subnet)
if private_ip_address is not None:
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'Static'
return parent
def create_ag_frontend_port(cmd, resource_group_name, application_gateway_name, item_name, port,
no_wait=False):
ApplicationGatewayFrontendPort = cmd.get_models('ApplicationGatewayFrontendPort')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_port = ApplicationGatewayFrontendPort(name=item_name, port=port)
upsert_to_collection(ag, 'frontend_ports', new_port, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_port(instance, parent, item_name, port=None):
if port is not None:
instance.port = port
return parent
def create_ag_http_listener(cmd, resource_group_name, application_gateway_name, item_name,
frontend_port, frontend_ip=None, host_name=None, ssl_cert=None,
firewall_policy=None, no_wait=False, host_names=None):
ApplicationGatewayHttpListener, SubResource = cmd.get_models('ApplicationGatewayHttpListener', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not frontend_ip:
frontend_ip = _get_default_id(ag, 'frontend_ip_configurations', '--frontend-ip')
new_listener = ApplicationGatewayHttpListener(
name=item_name,
frontend_ip_configuration=SubResource(id=frontend_ip),
frontend_port=SubResource(id=frontend_port),
host_name=host_name,
require_server_name_indication=True if ssl_cert and host_name else None,
protocol='https' if ssl_cert else 'http',
ssl_certificate=SubResource(id=ssl_cert) if ssl_cert else None,
host_names=host_names
)
if cmd.supported_api_version(min_api='2019-09-01'):
new_listener.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
upsert_to_collection(ag, 'http_listeners', new_listener, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_http_listener(cmd, instance, parent, item_name, frontend_ip=None, frontend_port=None,
host_name=None, ssl_cert=None, firewall_policy=None, host_names=None):
SubResource = cmd.get_models('SubResource')
if frontend_ip is not None:
instance.frontend_ip_configuration = SubResource(id=frontend_ip)
if frontend_port is not None:
instance.frontend_port = SubResource(id=frontend_port)
if ssl_cert is not None:
if ssl_cert:
instance.ssl_certificate = SubResource(id=ssl_cert)
instance.protocol = 'Https'
else:
instance.ssl_certificate = None
instance.protocol = 'Http'
if host_name is not None:
instance.host_name = host_name or None
if cmd.supported_api_version(min_api='2019-09-01'):
if firewall_policy is not None:
instance.firewall_policy = SubResource(id=firewall_policy)
if host_names is not None:
instance.host_names = host_names or None
instance.require_server_name_indication = instance.host_name and instance.protocol.lower() == 'https'
return parent
def assign_ag_identity(cmd, resource_group_name, application_gateway_name,
user_assigned_identity, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
ag.identity = identity_instance
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def remove_ag_identity(cmd, resource_group_name, application_gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
logger.warning("This command will be ignored. The identity doesn't exist.")
ag.identity = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_identity(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
raise CLIError("Please first use 'az network application-gateway identity assign` to init the identity.")
return ag.identity
def add_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
frontend_ip,
private_link_name,
private_link_subnet_name_or_id,
private_link_subnet_prefix=None,
private_link_primary=None,
private_link_ip_address=None,
no_wait=False):
(SubResource, IPAllocationMethod, Subnet,
ApplicationGatewayPrivateLinkConfiguration,
ApplicationGatewayPrivateLinkIpConfiguration) = cmd.get_models(
'SubResource', 'IPAllocationMethod', 'Subnet',
'ApplicationGatewayPrivateLinkConfiguration', 'ApplicationGatewayPrivateLinkIpConfiguration')
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
private_link_config_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=appgw.name,
child_type_1='privateLinkConfigurations',
child_name_1=private_link_name
)
if not any(fic for fic in appgw.frontend_ip_configurations if fic.name == frontend_ip):
raise CLIError("Frontend IP doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == private_link_config_id:
raise CLIError('Frontend IP already reference an existing Private Link')
if fic.name == frontend_ip:
break
else:
raise CLIError("Frontend IP doesn't exist")
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
raise CLIError('Private Link name duplicates')
# get the virtual network of this application gateway
vnet_name = parse_resource_id(appgw.gateway_ip_configurations[0].subnet.id)['name']
vnet = ncf.virtual_networks.get(resource_group_name, vnet_name)
# prepare the subnet for new private link
for subnet in vnet.subnets:
if subnet.name == private_link_subnet_name_or_id:
raise CLIError('Subnet duplicates')
if subnet.address_prefix == private_link_subnet_prefix:
raise CLIError('Subnet prefix duplicates')
if subnet.address_prefixes and private_link_subnet_prefix in subnet.address_prefixes:
raise CLIError('Subnet prefix duplicates')
if is_valid_resource_id(private_link_subnet_name_or_id):
private_link_subnet_id = private_link_subnet_name_or_id
else:
private_link_subnet = Subnet(name=private_link_subnet_name_or_id,
address_prefix=private_link_subnet_prefix,
private_link_service_network_policies='Disabled')
private_link_subnet_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name,
child_type_1='subnets',
child_name_1=private_link_subnet_name_or_id
)
vnet.subnets.append(private_link_subnet)
ncf.virtual_networks.begin_create_or_update(resource_group_name, vnet_name, vnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name='PrivateLinkDefaultIPConfiguration',
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
private_link_config = ApplicationGatewayPrivateLinkConfiguration(
name=private_link_name,
ip_configurations=[private_link_ip_config]
)
# associate the private link with the frontend IP configuration
for fic in appgw.frontend_ip_configurations:
if fic.name == frontend_ip:
fic.private_link_configuration = SubResource(id=private_link_config_id)
appgw.private_link_configurations.append(private_link_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name, appgw)
def show_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link
def list_ag_private_link(cmd,
resource_group_name,
application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.private_link_configurations
def remove_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
removed_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
removed_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == removed_private_link.id:
fic.private_link_configuration = None
# the left vnet have to delete manually
# rs = parse_resource_id(removed_private_link.ip_configurations[0].subnet.id)
# vnet_resource_group, vnet_name, subnet = rs['resource_group'], rs['name'], rs['child_name_1']
# ncf.subnets.delete(vnet_resource_group, vnet_name, subnet)
appgw.private_link_configurations.remove(removed_private_link)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
# region application-gateway trusted-client-certificates
def add_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
ApplicationGatewayTrustedClientCertificate = cmd.get_models('ApplicationGatewayTrustedClientCertificate')
cert = ApplicationGatewayTrustedClientCertificate(name=client_cert_name, data=client_cert_data)
appgw.trusted_client_certificates.append(cert)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_trusted_client_certificate(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.trusted_client_certificates
def remove_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
appgw.trusted_client_certificates.remove(cert)
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_ag_backend_health(cmd, client, resource_group_name, application_gateway_name, expand=None,
protocol=None, host=None, path=None, timeout=None, host_name_from_http_settings=None,
match_body=None, match_status_codes=None, address_pool=None, http_settings=None):
from azure.cli.core.commands import LongRunningOperation
on_demand_arguments = {protocol, host, path, timeout, host_name_from_http_settings, match_body, match_status_codes,
address_pool, http_settings}
if on_demand_arguments.difference({None}) and cmd.supported_api_version(min_api='2019-04-01'):
SubResource, ApplicationGatewayOnDemandProbe, ApplicationGatewayProbeHealthResponseMatch = cmd.get_models(
"SubResource", "ApplicationGatewayOnDemandProbe", "ApplicationGatewayProbeHealthResponseMatch")
probe_request = ApplicationGatewayOnDemandProbe(
protocol=protocol,
host=host,
path=path,
timeout=timeout,
pick_host_name_from_backend_http_settings=host_name_from_http_settings
)
if match_body is not None or match_status_codes is not None:
probe_request.match = ApplicationGatewayProbeHealthResponseMatch(
body=match_body,
status_codes=match_status_codes,
)
if address_pool is not None:
if not is_valid_resource_id(address_pool):
address_pool = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendAddressPools',
child_name_1=address_pool
)
probe_request.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
if not is_valid_resource_id(http_settings):
http_settings = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendHttpSettingsCollection',
child_name_1=http_settings
)
probe_request.backend_http_settings = SubResource(id=http_settings)
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health_on_demand(
resource_group_name, application_gateway_name, probe_request, expand))
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health(
resource_group_name, application_gateway_name, expand))
# endregion
# region application-gateway ssl-profile
def add_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
(SubResource,
ApplicationGatewaySslPolicy,
ApplicationGatewayClientAuthConfiguration,
ApplicationGatewaySslProfile) = cmd.get_models('SubResource',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayClientAuthConfiguration',
'ApplicationGatewaySslProfile')
sr_trusted_client_certificates = [SubResource(id=item) for item in
trusted_client_certificates] if trusted_client_certificates else None
ssl_policy = ApplicationGatewaySslPolicy(policy_name=policy_name, policy_type=policy_type,
min_protocol_version=min_protocol_version,
cipher_suites=cipher_suites, disabled_ssl_protocols=disabled_ssl_protocols)
client_auth = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=client_auth_configuration) if client_auth_configuration else None
ssl_profile = ApplicationGatewaySslProfile(trusted_client_certificates=sr_trusted_client_certificates,
ssl_policy=ssl_policy, client_auth_configuration=client_auth,
name=ssl_profile_name)
appgw.ssl_profiles.append(ssl_profile)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_ssl_profile(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.ssl_profiles
def remove_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
appgw.ssl_profiles.remove(profile)
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
# endregion
def add_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
private_link_primary=False,
private_link_ip_address=None,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
(SubResource, IPAllocationMethod,
ApplicationGatewayPrivateLinkIpConfiguration) = \
cmd.get_models('SubResource', 'IPAllocationMethod',
'ApplicationGatewayPrivateLinkIpConfiguration')
private_link_subnet_id = target_private_link.ip_configurations[0].subnet.id
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name=private_link_ip_name,
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
target_private_link.ip_configurations.append(private_link_ip_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def show_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
target_private_link_ip_config = None
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
target_private_link_ip_config = pic
break
else:
raise CLIError("IP Configuration doesn't exist")
return target_private_link_ip_config
def list_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link.ip_configurations
def remove_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
updated_ip_configurations = target_private_link.ip_configurations
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
updated_ip_configurations.remove(pic)
break
else:
raise CLIError("IP Configuration doesn't exist")
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def create_ag_backend_http_settings_collection(cmd, resource_group_name, application_gateway_name, item_name, port,
probe=None, protocol='http', cookie_based_affinity=None, timeout=None,
no_wait=False, connection_draining_timeout=0,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
ApplicationGatewayBackendHttpSettings, ApplicationGatewayConnectionDraining, SubResource = cmd.get_models(
'ApplicationGatewayBackendHttpSettings', 'ApplicationGatewayConnectionDraining', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_settings = ApplicationGatewayBackendHttpSettings(
port=port,
protocol=protocol,
cookie_based_affinity=cookie_based_affinity or 'Disabled',
request_timeout=timeout,
probe=SubResource(id=probe) if probe else None,
name=item_name)
if cmd.supported_api_version(min_api='2016-09-01'):
new_settings.authentication_certificates = [SubResource(id=x) for x in auth_certs or []]
if cmd.supported_api_version(min_api='2016-12-01'):
new_settings.connection_draining = \
ApplicationGatewayConnectionDraining(
enabled=bool(connection_draining_timeout), drain_timeout_in_sec=connection_draining_timeout or 1)
if cmd.supported_api_version(min_api='2017-06-01'):
new_settings.host_name = host_name
new_settings.pick_host_name_from_backend_address = host_name_from_backend_pool
new_settings.affinity_cookie_name = affinity_cookie_name
new_settings.probe_enabled = enable_probe
new_settings.path = path
if cmd.supported_api_version(min_api='2019-04-01'):
new_settings.trusted_root_certificates = [SubResource(id=x) for x in root_certs or []]
upsert_to_collection(ag, 'backend_http_settings_collection', new_settings, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_http_settings_collection(cmd, instance, parent, item_name, port=None, probe=None, protocol=None,
cookie_based_affinity=None, timeout=None,
connection_draining_timeout=None,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
SubResource = cmd.get_models('SubResource')
if auth_certs == "":
instance.authentication_certificates = None
elif auth_certs is not None:
instance.authentication_certificates = [SubResource(id=x) for x in auth_certs]
if root_certs == "":
instance.trusted_root_certificates = None
elif root_certs is not None:
instance.trusted_root_certificates = [SubResource(id=x) for x in root_certs]
if port is not None:
instance.port = port
if probe is not None:
instance.probe = SubResource(id=probe)
if protocol is not None:
instance.protocol = protocol
if cookie_based_affinity is not None:
instance.cookie_based_affinity = cookie_based_affinity
if timeout is not None:
instance.request_timeout = timeout
if connection_draining_timeout is not None:
instance.connection_draining = {
'enabled': bool(connection_draining_timeout),
'drain_timeout_in_sec': connection_draining_timeout or 1
}
if host_name is not None:
instance.host_name = host_name
if host_name_from_backend_pool is not None:
instance.pick_host_name_from_backend_address = host_name_from_backend_pool
if affinity_cookie_name is not None:
instance.affinity_cookie_name = affinity_cookie_name
if enable_probe is not None:
instance.probe_enabled = enable_probe
if path is not None:
instance.path = path
return parent
def create_ag_redirect_configuration(cmd, resource_group_name, application_gateway_name, item_name, redirect_type,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, no_wait=False):
ApplicationGatewayRedirectConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayRedirectConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_config = ApplicationGatewayRedirectConfiguration(
name=item_name,
redirect_type=redirect_type,
target_listener=SubResource(id=target_listener) if target_listener else None,
target_url=target_url,
include_path=include_path,
include_query_string=include_query_string)
upsert_to_collection(ag, 'redirect_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_redirect_configuration(cmd, instance, parent, item_name, redirect_type=None,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, raw=False):
SubResource = cmd.get_models('SubResource')
if redirect_type:
instance.redirect_type = redirect_type
if target_listener:
instance.target_listener = SubResource(id=target_listener)
instance.target_url = None
if target_url:
instance.target_listener = None
instance.target_url = target_url
if include_path is not None:
instance.include_path = include_path
if include_query_string is not None:
instance.include_query_string = include_query_string
return parent
def create_ag_rewrite_rule_set(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False):
ApplicationGatewayRewriteRuleSet = cmd.get_models(
'ApplicationGatewayRewriteRuleSet')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_set = ApplicationGatewayRewriteRuleSet(name=item_name)
upsert_to_collection(ag, 'rewrite_rule_sets', new_set, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, item_name,
path='rewrite_rule_sets', key_path='name')
def update_ag_rewrite_rule_set(instance, parent, item_name):
return parent
def create_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
sequence=None, request_headers=None, response_headers=None, no_wait=False,
modified_path=None, modified_query_string=None, enable_reroute=None):
(ApplicationGatewayRewriteRule,
ApplicationGatewayRewriteRuleActionSet,
ApplicationGatewayUrlConfiguration) = cmd.get_models('ApplicationGatewayRewriteRule',
'ApplicationGatewayRewriteRuleActionSet',
'ApplicationGatewayUrlConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(ag, rule_set_name,
path='rewrite_rule_sets', key_path='name')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
new_rule = ApplicationGatewayRewriteRule(
name=rule_name,
rule_sequence=sequence,
action_set=ApplicationGatewayRewriteRuleActionSet(
request_header_configurations=request_headers,
response_header_configurations=response_headers,
url_configuration=url_configuration
)
)
upsert_to_collection(rule_set, 'rewrite_rules', new_rule, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def update_ag_rewrite_rule(instance, parent, cmd, rule_set_name, rule_name, sequence=None,
request_headers=None, response_headers=None,
modified_path=None, modified_query_string=None, enable_reroute=None):
with cmd.update_context(instance) as c:
c.set_param('rule_sequence', sequence)
c.set_param('action_set.request_header_configurations', request_headers)
c.set_param('action_set.response_header_configurations', response_headers)
ApplicationGatewayUrlConfiguration = cmd.get_models('ApplicationGatewayUrlConfiguration')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
c.set_param('action_set.url_configuration', url_configuration)
return parent
def show_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def list_ag_rewrite_rules(cmd, resource_group_name, application_gateway_name, rule_set_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, path='rewrite_rule_sets.rewrite_rules', key_path='name')
def delete_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(gateway, rule_set_name, path='rewrite_rule_sets', key_path='name')
rule = find_child_item(rule_set, rule_name, path='rewrite_rules', key_path='name')
rule_set.rewrite_rules.remove(rule)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
variable, no_wait=False, pattern=None, ignore_case=None, negate=None):
ApplicationGatewayRewriteRuleCondition = cmd.get_models(
'ApplicationGatewayRewriteRuleCondition')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule = find_child_item(ag, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
new_condition = ApplicationGatewayRewriteRuleCondition(
variable=variable,
pattern=pattern,
ignore_case=ignore_case,
negate=negate
)
upsert_to_collection(rule, 'conditions', new_condition, 'variable')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def update_ag_rewrite_rule_condition(instance, parent, cmd, rule_set_name, rule_name, variable, pattern=None,
ignore_case=None, negate=None):
with cmd.update_context(instance) as c:
c.set_param('pattern', pattern)
c.set_param('ignore_case', ignore_case)
c.set_param('negate', negate)
return parent
def show_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def list_ag_rewrite_rule_conditions(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name')
def delete_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule = find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
condition = find_child_item(rule, variable, path='conditions', key_path='variable')
rule.conditions.remove(condition)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_probe(cmd, resource_group_name, application_gateway_name, item_name, protocol, host,
path, interval=30, timeout=120, threshold=8, no_wait=False, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
ApplicationGatewayProbe, ProbeMatchCriteria = cmd.get_models(
'ApplicationGatewayProbe', 'ApplicationGatewayProbeHealthResponseMatch')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_probe = ApplicationGatewayProbe(
name=item_name,
protocol=protocol,
host=host,
path=path,
interval=interval,
timeout=timeout,
unhealthy_threshold=threshold)
if cmd.supported_api_version(min_api='2017-06-01'):
new_probe.pick_host_name_from_backend_http_settings = host_name_from_http_settings
new_probe.min_servers = min_servers
new_probe.match = ProbeMatchCriteria(body=match_body, status_codes=match_status_codes)
if cmd.supported_api_version(min_api='2019-04-01'):
new_probe.port = port
upsert_to_collection(ag, 'probes', new_probe, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_probe(cmd, instance, parent, item_name, protocol=None, host=None, path=None,
interval=None, timeout=None, threshold=None, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
if protocol is not None:
instance.protocol = protocol
if host is not None:
instance.host = host
if path is not None:
instance.path = path
if interval is not None:
instance.interval = interval
if timeout is not None:
instance.timeout = timeout
if threshold is not None:
instance.unhealthy_threshold = threshold
if host_name_from_http_settings is not None:
instance.pick_host_name_from_backend_http_settings = host_name_from_http_settings
if min_servers is not None:
instance.min_servers = min_servers
if match_body is not None or match_status_codes is not None:
ProbeMatchCriteria = \
cmd.get_models('ApplicationGatewayProbeHealthResponseMatch')
instance.match = instance.match or ProbeMatchCriteria()
if match_body is not None:
instance.match.body = match_body
if match_status_codes is not None:
instance.match.status_codes = match_status_codes
if port is not None:
instance.port = port
return parent
def create_ag_request_routing_rule(cmd, resource_group_name, application_gateway_name, item_name,
address_pool=None, http_settings=None, http_listener=None, redirect_config=None,
url_path_map=None, rule_type='Basic', no_wait=False, rewrite_rule_set=None,
priority=None):
ApplicationGatewayRequestRoutingRule, SubResource = cmd.get_models(
'ApplicationGatewayRequestRoutingRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not address_pool and not redirect_config:
address_pool = _get_default_id(ag, 'backend_address_pools', '--address-pool')
if not http_settings and not redirect_config:
http_settings = _get_default_id(ag, 'backend_http_settings_collection', '--http-settings')
if not http_listener:
http_listener = _get_default_id(ag, 'http_listeners', '--http-listener')
new_rule = ApplicationGatewayRequestRoutingRule(
name=item_name,
rule_type=rule_type,
priority=priority,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
http_listener=SubResource(id=http_listener),
url_path_map=SubResource(id=url_path_map) if url_path_map else None)
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
upsert_to_collection(ag, 'request_routing_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_request_routing_rule(cmd, instance, parent, item_name, address_pool=None,
http_settings=None, http_listener=None, redirect_config=None, url_path_map=None,
rule_type=None, rewrite_rule_set=None, priority=None):
SubResource = cmd.get_models('SubResource')
if address_pool is not None:
instance.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
instance.backend_http_settings = SubResource(id=http_settings)
if redirect_config is not None:
instance.redirect_configuration = SubResource(id=redirect_config)
if http_listener is not None:
instance.http_listener = SubResource(id=http_listener)
if url_path_map is not None:
instance.url_path_map = SubResource(id=url_path_map)
if rule_type is not None:
instance.rule_type = rule_type
if rewrite_rule_set is not None:
instance.rewrite_rule_set = SubResource(id=rewrite_rule_set)
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
return parent
def create_ag_ssl_certificate(cmd, resource_group_name, application_gateway_name, item_name, cert_data=None,
cert_password=None, key_vault_secret_id=None, no_wait=False):
ApplicationGatewaySslCertificate = cmd.get_models('ApplicationGatewaySslCertificate')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_cert = ApplicationGatewaySslCertificate(
name=item_name, data=cert_data, password=cert_password, key_vault_secret_id=key_vault_secret_id)
upsert_to_collection(ag, 'ssl_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_ssl_certificate(instance, parent, item_name,
cert_data=None, cert_password=None, key_vault_secret_id=None):
if cert_data is not None:
instance.data = cert_data
if cert_password is not None:
instance.password = cert_password
if key_vault_secret_id is not None:
instance.key_vault_secret_id = key_vault_secret_id
return parent
def set_ag_ssl_policy_2017_03_01(cmd, resource_group_name, application_gateway_name, disabled_ssl_protocols=None,
clear=False, no_wait=False):
ApplicationGatewaySslPolicy = cmd.get_models('ApplicationGatewaySslPolicy')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.ssl_policy = None if clear else ApplicationGatewaySslPolicy(
disabled_ssl_protocols=disabled_ssl_protocols)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_ssl_policy_2017_06_01(cmd, resource_group_name, application_gateway_name, policy_name=None, policy_type=None,
disabled_ssl_protocols=None, cipher_suites=None, min_protocol_version=None,
no_wait=False):
ApplicationGatewaySslPolicy, ApplicationGatewaySslPolicyType = cmd.get_models(
'ApplicationGatewaySslPolicy', 'ApplicationGatewaySslPolicyType')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
policy_type = None
if policy_name:
policy_type = ApplicationGatewaySslPolicyType.predefined.value
elif cipher_suites or min_protocol_version:
policy_type = ApplicationGatewaySslPolicyType.custom.value
ag.ssl_policy = ApplicationGatewaySslPolicy(
policy_name=policy_name,
policy_type=policy_type,
disabled_ssl_protocols=disabled_ssl_protocols,
cipher_suites=cipher_suites,
min_protocol_version=min_protocol_version)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_ssl_policy(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).ssl_policy
def create_ag_trusted_root_certificate(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False,
cert_data=None, keyvault_secret=None):
ApplicationGatewayTrustedRootCertificate = cmd.get_models('ApplicationGatewayTrustedRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
root_cert = ApplicationGatewayTrustedRootCertificate(name=item_name, data=cert_data,
key_vault_secret_id=keyvault_secret)
upsert_to_collection(ag, 'trusted_root_certificates', root_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_trusted_root_certificate(instance, parent, item_name, cert_data=None, keyvault_secret=None):
if cert_data is not None:
instance.data = cert_data
if keyvault_secret is not None:
instance.key_vault_secret_id = keyvault_secret
return parent
def create_ag_url_path_map(cmd, resource_group_name, application_gateway_name, item_name, paths,
address_pool=None, http_settings=None, redirect_config=None, rewrite_rule_set=None,
default_address_pool=None, default_http_settings=None, default_redirect_config=None,
no_wait=False, rule_name='default', default_rewrite_rule_set=None, firewall_policy=None):
ApplicationGatewayUrlPathMap, ApplicationGatewayPathRule, SubResource = cmd.get_models(
'ApplicationGatewayUrlPathMap', 'ApplicationGatewayPathRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_rule = ApplicationGatewayPathRule(
name=rule_name,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
paths=paths
)
new_map = ApplicationGatewayUrlPathMap(
name=item_name,
default_backend_address_pool=SubResource(id=default_address_pool) if default_address_pool else None,
default_backend_http_settings=SubResource(id=default_http_settings) if default_http_settings else None,
path_rules=[])
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
new_map.default_redirect_configuration = \
SubResource(id=default_redirect_config) if default_redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
new_map.default_rewrite_rule_set = \
SubResource(id=default_rewrite_rule_set) if default_rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
# pull defaults from the rule specific properties if the default-* option isn't specified
if new_rule.backend_address_pool and not new_map.default_backend_address_pool:
new_map.default_backend_address_pool = new_rule.backend_address_pool
if new_rule.backend_http_settings and not new_map.default_backend_http_settings:
new_map.default_backend_http_settings = new_rule.backend_http_settings
if new_rule.redirect_configuration and not new_map.default_redirect_configuration:
new_map.default_redirect_configuration = new_rule.redirect_configuration
new_map.path_rules.append(new_rule)
upsert_to_collection(ag, 'url_path_maps', new_map, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_url_path_map(cmd, instance, parent, item_name, default_address_pool=None,
default_http_settings=None, default_redirect_config=None, raw=False,
default_rewrite_rule_set=None):
SubResource = cmd.get_models('SubResource')
if default_address_pool == '':
instance.default_backend_address_pool = None
elif default_address_pool:
instance.default_backend_address_pool = SubResource(id=default_address_pool)
if default_http_settings == '':
instance.default_backend_http_settings = None
elif default_http_settings:
instance.default_backend_http_settings = SubResource(id=default_http_settings)
if default_redirect_config == '':
instance.default_redirect_configuration = None
elif default_redirect_config:
instance.default_redirect_configuration = SubResource(id=default_redirect_config)
if default_rewrite_rule_set == '':
instance.default_rewrite_rule_set = None
elif default_rewrite_rule_set:
instance.default_rewrite_rule_set = SubResource(id=default_rewrite_rule_set)
return parent
def create_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, paths, address_pool=None, http_settings=None, redirect_config=None,
firewall_policy=None, no_wait=False, rewrite_rule_set=None):
ApplicationGatewayPathRule, SubResource = cmd.get_models('ApplicationGatewayPathRule', 'SubResource')
if address_pool and redirect_config:
raise CLIError("Cannot reference a BackendAddressPool when Redirect Configuration is specified.")
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
default_backend_pool = SubResource(id=url_map.default_backend_address_pool.id) \
if (url_map.default_backend_address_pool and not redirect_config) else None
default_http_settings = SubResource(id=url_map.default_backend_http_settings.id) \
if url_map.default_backend_http_settings else None
new_rule = ApplicationGatewayPathRule(
name=item_name,
paths=paths,
backend_address_pool=SubResource(id=address_pool) if address_pool else default_backend_pool,
backend_http_settings=SubResource(id=http_settings) if http_settings else default_http_settings)
if cmd.supported_api_version(min_api='2017-06-01'):
default_redirect = SubResource(id=url_map.default_redirect_configuration.id) \
if (url_map.default_redirect_configuration and not address_pool) else None
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else default_redirect
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
upsert_to_collection(url_map, 'path_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def delete_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
url_map.path_rules = \
[x for x in url_map.path_rules if x.name.lower() != item_name.lower()]
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2016_09_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
no_wait=False):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2017_03_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
rule_set_type='OWASP', rule_set_version=None,
disabled_rule_groups=None,
disabled_rules=None, no_wait=False,
request_body_check=None, max_request_body_size=None, file_upload_limit=None,
exclusions=None):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode, rule_set_type=rule_set_type,
rule_set_version=rule_set_version)
if disabled_rule_groups or disabled_rules:
ApplicationGatewayFirewallDisabledRuleGroup = cmd.get_models('ApplicationGatewayFirewallDisabledRuleGroup')
disabled_groups = []
# disabled groups can be added directly
for group in disabled_rule_groups or []:
disabled_groups.append(ApplicationGatewayFirewallDisabledRuleGroup(rule_group_name=group))
def _flatten(collection, expand_property_fn):
for each in collection:
for value in expand_property_fn(each):
yield value
# for disabled rules, we have to look up the IDs
if disabled_rules:
results = list_ag_waf_rule_sets(ncf, _type=rule_set_type, version=rule_set_version, group='*')
for group in _flatten(results, lambda r: r.rule_groups):
disabled_group = ApplicationGatewayFirewallDisabledRuleGroup(
rule_group_name=group.rule_group_name, rules=[])
for rule in group.rules:
if str(rule.rule_id) in disabled_rules:
disabled_group.rules.append(rule.rule_id)
if disabled_group.rules:
disabled_groups.append(disabled_group)
ag.web_application_firewall_configuration.disabled_rule_groups = disabled_groups
if cmd.supported_api_version(min_api='2018-08-01'):
ag.web_application_firewall_configuration.request_body_check = request_body_check
ag.web_application_firewall_configuration.max_request_body_size_in_kb = max_request_body_size
ag.web_application_firewall_configuration.file_upload_limit_in_mb = file_upload_limit
ag.web_application_firewall_configuration.exclusions = exclusions
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_waf_config(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).web_application_firewall_configuration
def list_ag_waf_rule_sets(client, _type=None, version=None, group=None):
results = client.list_available_waf_rule_sets().value
filtered_results = []
# filter by rule set name or version
for rule_set in results:
if _type and _type.lower() != rule_set.rule_set_type.lower():
continue
if version and version.lower() != rule_set.rule_set_version.lower():
continue
filtered_groups = []
for rule_group in rule_set.rule_groups:
if not group:
rule_group.rules = None
filtered_groups.append(rule_group)
elif group.lower() == rule_group.rule_group_name.lower() or group == '*':
filtered_groups.append(rule_group)
if filtered_groups:
rule_set.rule_groups = filtered_groups
filtered_results.append(rule_set)
return filtered_results
# endregion
# region ApplicationGatewayWAFPolicy
def create_ag_waf_policy(cmd, client, resource_group_name, policy_name,
location=None, tags=None, rule_set_type='OWASP',
rule_set_version='3.0'):
WebApplicationFirewallPolicy, ManagedRulesDefinition, \
ManagedRuleSet = cmd.get_models('WebApplicationFirewallPolicy',
'ManagedRulesDefinition',
'ManagedRuleSet')
# https://docs.microsoft.com/en-us/azure/application-gateway/waf-overview
# mandatory default rule with empty rule sets
managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type, rule_set_version=rule_set_version)
managed_rule_definition = ManagedRulesDefinition(managed_rule_sets=[managed_rule_set])
waf_policy = WebApplicationFirewallPolicy(location=location, tags=tags, managed_rules=managed_rule_definition)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_ag_waf_policy(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_ag_waf_policies(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'web_application_firewall_policies', resource_group_name)
# endregion
# region ApplicationGatewayWAFPolicyRules PolicySettings
def update_waf_policy_setting(cmd, instance,
state=None, mode=None,
max_request_body_size_in_kb=None, file_upload_limit_in_mb=None,
request_body_check=False):
if state is not None:
instance.policy_settings.state = state
if mode is not None:
instance.policy_settings.mode = mode
if max_request_body_size_in_kb is not None:
instance.policy_settings.max_request_body_size_in_kb = max_request_body_size_in_kb
if file_upload_limit_in_mb is not None:
instance.policy_settings.file_upload_limit_in_mb = file_upload_limit_in_mb
if request_body_check is not None:
instance.policy_settings.request_body_check = request_body_check
return instance
def list_waf_policy_setting(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).policy_settings
# endregion
# region ApplicationGatewayWAFPolicyRules
def create_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, priority, rule_type, action):
"""
Initialize custom rule for WAF policy
"""
WebApplicationFirewallCustomRule = cmd.get_models('WebApplicationFirewallCustomRule')
waf_policy = client.get(resource_group_name, policy_name)
new_custom_rule = WebApplicationFirewallCustomRule(
name=rule_name,
action=action,
match_conditions=[],
priority=priority,
rule_type=rule_type
)
upsert_to_collection(waf_policy, 'custom_rules', new_custom_rule, 'name')
parent = client.create_or_update(resource_group_name, policy_name, waf_policy)
return find_child_item(parent, rule_name, path='custom_rules', key_path='name')
# pylint: disable=unused-argument
def update_waf_custom_rule(instance, parent, cmd, rule_name, priority=None, rule_type=None, action=None):
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
c.set_param('rule_type', rule_type)
c.set_param('action', action)
return parent
def show_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
def list_waf_custom_rules(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).custom_rules
def delete_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, no_wait=None):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
waf_policy.custom_rules.remove(rule)
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicyRuleMatchConditions
def add_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name,
match_variables, operator, match_values, negation_condition=None, transforms=None):
MatchCondition = cmd.get_models('MatchCondition')
waf_policy = client.get(resource_group_name, policy_name)
custom_rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
new_cond = MatchCondition(
match_variables=match_variables,
operator=operator,
match_values=match_values,
negation_conditon=negation_condition,
transforms=transforms
)
custom_rule.match_conditions.append(new_cond)
upsert_to_collection(waf_policy, 'custom_rules', custom_rule, 'name', warn=False)
client.create_or_update(resource_group_name, policy_name, waf_policy)
return new_cond
def list_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name').match_conditions
def remove_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name, index):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
rule.match_conditions.pop(index)
client.create_or_update(resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule ManagedRuleSet
def add_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
rule_group_name=None, rules=None):
"""
Add managed rule set to the WAF policy managed rules.
Visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
waf_policy = client.get(resource_group_name, policy_name)
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules is not None else []
rule_group_override = None
if rule_group_name is not None:
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides)
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_override in rule_set.rule_group_overrides:
if rule_override.rule_group_name == rule_group_name:
# Add one rule
rule_override.rules.extend(managed_rule_overrides)
break
else:
# Add one rule group
if rule_group_override is not None:
rule_set.rule_group_overrides.append(rule_group_override)
break
else:
# Add new rule set
waf_policy.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_waf_managed_rule_set(cmd, instance, rule_set_type, rule_set_version, rule_group_name=None, rules=None):
"""
Update(Override) existing rule set of a WAF policy managed rules.
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules else None
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides) if managed_rule_overrides else None
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
updated_rule_set = None
for rule_set in instance.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version != rule_set_version:
updated_rule_set = rule_set
break
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
updated_rule_set = rule_set
break
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg:
rg.rules = managed_rule_overrides # differentiate with add_waf_managed_rule_set()
else:
rule_set.rule_group_overrides.append(rule_group_override)
if updated_rule_set:
instance.managed_rules.managed_rule_sets.remove(updated_rule_set)
instance.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return instance
def remove_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None):
"""
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
"""
waf_policy = client.get(resource_group_name, policy_name)
delete_rule_set = None
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type or rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
delete_rule_set = rule_set
break
# Remove one rule from rule group
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg is None:
raise CLIError('Rule set group [ {} ] not found.'.format(rule_group_name))
rule_set.rule_group_overrides.remove(rg)
if delete_rule_set:
waf_policy.managed_rules.managed_rule_sets.remove(delete_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_set(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule OwaspCrsExclusionEntry
def add_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name,
match_variable, selector_match_operator, selector):
OwaspCrsExclusionEntry = cmd.get_models('OwaspCrsExclusionEntry')
exclusion_entry = OwaspCrsExclusionEntry(match_variable=match_variable,
selector_match_operator=selector_match_operator,
selector=selector)
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions.append(exclusion_entry)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def remove_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions = []
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationSecurityGroups
def create_asg(cmd, client, resource_group_name, application_security_group_name, location=None, tags=None):
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup')
asg = ApplicationSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, application_security_group_name, asg)
def update_asg(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region DdosProtectionPlans
def create_ddos_plan(cmd, resource_group_name, ddos_plan_name, location=None, tags=None, vnets=None):
from azure.cli.core.commands import LongRunningOperation
ddos_client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
ddos_protection_plan = cmd.get_models('DdosProtectionPlan')()
if location:
ddos_protection_plan.location = location
if tags:
ddos_protection_plan.tags = tags
if not vnets:
# if no VNETs can do a simple PUT
return ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)
# if VNETs specified, have to create the protection plan and then add the VNETs
plan_id = LongRunningOperation(cmd.cli_ctx)(
ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)).id
SubResource = cmd.get_models('SubResource')
logger.info('Attempting to attach VNets to newly created DDoS protection plan.')
for vnet_subresource in vnets:
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
id_parts = parse_resource_id(vnet_subresource.id)
vnet = vnet_client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=plan_id)
vnet_client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return ddos_client.get(resource_group_name, ddos_plan_name)
def update_ddos_plan(cmd, instance, tags=None, vnets=None):
SubResource = cmd.get_models('SubResource')
if tags is not None:
instance.tags = tags
if vnets is not None:
logger.info('Attempting to update the VNets attached to the DDoS protection plan.')
vnet_ids = set([])
if len(vnets) == 1 and not vnets[0]:
pass
else:
vnet_ids = {x.id for x in vnets}
existing_vnet_ids = {x.id for x in instance.virtual_networks} if instance.virtual_networks else set([])
client = network_client_factory(cmd.cli_ctx).virtual_networks
for vnet_id in vnet_ids.difference(existing_vnet_ids):
logger.info("Adding VNet '%s' to plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=instance.id)
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
for vnet_id in existing_vnet_ids.difference(vnet_ids):
logger.info("Removing VNet '%s' from plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = None
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return instance
def list_ddos_plans(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
# endregion
# region DNS Commands
# add delegation name server record for the created child zone in it's parent zone.
def add_dns_delegation(cmd, child_zone, parent_zone, child_rg, child_zone_name):
"""
:param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone
"""
import sys
from azure.core.exceptions import HttpResponseError
parent_rg = child_rg
parent_subscription_id = None
parent_zone_name = parent_zone
if is_valid_resource_id(parent_zone):
id_parts = parse_resource_id(parent_zone)
parent_rg = id_parts['resource_group']
parent_subscription_id = id_parts['subscription']
parent_zone_name = id_parts['name']
if all([parent_zone_name, parent_rg, child_zone_name, child_zone]) and child_zone_name.endswith(parent_zone_name):
record_set_name = child_zone_name.replace('.' + parent_zone_name, '')
try:
for dname in child_zone.name_servers:
add_dns_ns_record(cmd, parent_rg, parent_zone_name, record_set_name, dname, parent_subscription_id)
print('Delegation added succesfully in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print('Could not add delegation in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
def create_dns_zone(cmd, client, resource_group_name, zone_name, parent_zone_name=None, tags=None,
if_none_match=False, zone_type='Public', resolution_vnets=None, registration_vnets=None):
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
zone = Zone(location='global', tags=tags)
if hasattr(zone, 'zone_type'):
zone.zone_type = zone_type
zone.registration_virtual_networks = registration_vnets
zone.resolution_virtual_networks = resolution_vnets
created_zone = client.create_or_update(resource_group_name, zone_name, zone,
if_none_match='*' if if_none_match else None)
if cmd.supported_api_version(min_api='2016-04-01') and parent_zone_name is not None:
logger.info('Attempting to add delegation in the parent zone')
add_dns_delegation(cmd, created_zone, parent_zone_name, resource_group_name, zone_name)
return created_zone
def update_dns_zone(instance, tags=None, zone_type=None, resolution_vnets=None, registration_vnets=None):
if tags is not None:
instance.tags = tags
if zone_type:
instance.zone_type = zone_type
if resolution_vnets == ['']:
instance.resolution_virtual_networks = None
elif resolution_vnets:
instance.resolution_virtual_networks = resolution_vnets
if registration_vnets == ['']:
instance.registration_virtual_networks = None
elif registration_vnets:
instance.registration_virtual_networks = registration_vnets
return instance
def list_dns_zones(cmd, resource_group_name=None):
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).zones
if resource_group_name:
return ncf.list_by_resource_group(resource_group_name)
return ncf.list()
def create_dns_record_set(cmd, resource_group_name, zone_name, record_set_name, record_set_type,
metadata=None, if_match=None, if_none_match=None, ttl=3600, target_resource=None):
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
SubResource = cmd.get_models('SubResource', resource_type=ResourceType.MGMT_NETWORK)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = RecordSet(
ttl=ttl,
metadata=metadata,
target_resource=SubResource(id=target_resource) if target_resource else None
)
return client.create_or_update(resource_group_name, zone_name, record_set_name,
record_set_type, record_set, if_match=if_match,
if_none_match='*' if if_none_match else None)
def list_dns_record_set(client, resource_group_name, zone_name, record_type=None):
if record_type:
return client.list_by_type(resource_group_name, zone_name, record_type)
return client.list_by_dns_zone(resource_group_name, zone_name)
def update_dns_record_set(instance, cmd, metadata=None, target_resource=None):
if metadata is not None:
instance.metadata = metadata
if target_resource == '':
instance.target_resource = None
elif target_resource is not None:
SubResource = cmd.get_models('SubResource')
instance.target_resource = SubResource(id=target_resource)
return instance
def _type_to_property_name(key):
type_dict = {
'a': 'a_records',
'aaaa': 'aaaa_records',
'caa': 'caa_records',
'cname': 'cname_record',
'mx': 'mx_records',
'ns': 'ns_records',
'ptr': 'ptr_records',
'soa': 'soa_record',
'spf': 'txt_records',
'srv': 'srv_records',
'txt': 'txt_records',
}
return type_dict[key.lower()]
def export_zone(cmd, resource_group_name, zone_name, file_name=None):
from time import localtime, strftime
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
record_sets = client.record_sets.list_by_dns_zone(resource_group_name, zone_name)
zone_obj = OrderedDict({
'$origin': zone_name.rstrip('.') + '.',
'resource-group': resource_group_name,
'zone-name': zone_name.rstrip('.'),
'datetime': strftime('%a, %d %b %Y %X %z', localtime())
})
for record_set in record_sets:
record_type = record_set.type.rsplit('/', 1)[1].lower()
record_set_name = record_set.name
record_data = getattr(record_set, _type_to_property_name(record_type), None)
# ignore empty record sets
if not record_data:
continue
if not isinstance(record_data, list):
record_data = [record_data]
if record_set_name not in zone_obj:
zone_obj[record_set_name] = OrderedDict()
for record in record_data:
record_obj = {'ttl': record_set.ttl}
if record_type not in zone_obj[record_set_name]:
zone_obj[record_set_name][record_type] = []
if record_type == 'aaaa':
record_obj.update({'ip': record.ipv6_address})
elif record_type == 'a':
record_obj.update({'ip': record.ipv4_address})
elif record_type == 'caa':
record_obj.update({'val': record.value, 'tag': record.tag, 'flags': record.flags})
elif record_type == 'cname':
record_obj.update({'alias': record.cname.rstrip('.') + '.'})
elif record_type == 'mx':
record_obj.update({'preference': record.preference, 'host': record.exchange.rstrip('.') + '.'})
elif record_type == 'ns':
record_obj.update({'host': record.nsdname.rstrip('.') + '.'})
elif record_type == 'ptr':
record_obj.update({'host': record.ptrdname.rstrip('.') + '.'})
elif record_type == 'soa':
record_obj.update({
'mname': record.host.rstrip('.') + '.',
'rname': record.email.rstrip('.') + '.',
'serial': int(record.serial_number), 'refresh': record.refresh_time,
'retry': record.retry_time, 'expire': record.expire_time,
'minimum': record.minimum_ttl
})
zone_obj['$ttl'] = record.minimum_ttl
elif record_type == 'srv':
record_obj.update({'priority': record.priority, 'weight': record.weight,
'port': record.port, 'target': record.target.rstrip('.') + '.'})
elif record_type == 'txt':
record_obj.update({'txt': ''.join(record.value)})
zone_obj[record_set_name][record_type].append(record_obj)
zone_file_content = make_zone_file(zone_obj)
print(zone_file_content)
if file_name:
try:
with open(file_name, 'w') as f:
f.write(zone_file_content)
except IOError:
raise CLIError('Unable to export to file: {}'.format(file_name))
# pylint: disable=too-many-return-statements, inconsistent-return-statements
def _build_record(cmd, data):
AaaaRecord, ARecord, CaaRecord, CnameRecord, MxRecord, NsRecord, PtrRecord, SoaRecord, SrvRecord, TxtRecord = \
cmd.get_models('AaaaRecord', 'ARecord', 'CaaRecord', 'CnameRecord', 'MxRecord', 'NsRecord',
'PtrRecord', 'SoaRecord', 'SrvRecord', 'TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_type = data['delim'].lower()
try:
if record_type == 'aaaa':
return AaaaRecord(ipv6_address=data['ip'])
if record_type == 'a':
return ARecord(ipv4_address=data['ip'])
if (record_type == 'caa' and
supported_api_version(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS, min_api='2018-03-01-preview')):
return CaaRecord(value=data['val'], flags=int(data['flags']), tag=data['tag'])
if record_type == 'cname':
return CnameRecord(cname=data['alias'])
if record_type == 'mx':
return MxRecord(preference=data['preference'], exchange=data['host'])
if record_type == 'ns':
return NsRecord(nsdname=data['host'])
if record_type == 'ptr':
return PtrRecord(ptrdname=data['host'])
if record_type == 'soa':
return SoaRecord(host=data['host'], email=data['email'], serial_number=data['serial'],
refresh_time=data['refresh'], retry_time=data['retry'], expire_time=data['expire'],
minimum_ttl=data['minimum'])
if record_type == 'srv':
return SrvRecord(
priority=int(data['priority']), weight=int(data['weight']), port=int(data['port']),
target=data['target'])
if record_type in ['txt', 'spf']:
text_data = data['txt']
return TxtRecord(value=text_data) if isinstance(text_data, list) else TxtRecord(value=[text_data])
except KeyError as ke:
raise CLIError("The {} record '{}' is missing a property. {}"
.format(record_type, data['name'], ke))
# pylint: disable=too-many-statements
def import_zone(cmd, resource_group_name, zone_name, file_name):
from azure.cli.core.util import read_file_content
from azure.core.exceptions import HttpResponseError
import sys
logger.warning("In the future, zone name will be case insensitive.")
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
from azure.cli.core.azclierror import FileOperationError, UnclassifiedUserFault
try:
file_text = read_file_content(file_name)
except FileNotFoundError:
raise FileOperationError("No such file: " + str(file_name))
except IsADirectoryError:
raise FileOperationError("Is a directory: " + str(file_name))
except PermissionError:
raise FileOperationError("Permission denied: " + str(file_name))
except OSError as e:
raise UnclassifiedUserFault(e)
zone_obj = parse_zone_file(file_text, zone_name)
origin = zone_name
record_sets = {}
for record_set_name in zone_obj:
for record_set_type in zone_obj[record_set_name]:
record_set_obj = zone_obj[record_set_name][record_set_type]
if record_set_type == 'soa':
origin = record_set_name.rstrip('.')
if not isinstance(record_set_obj, list):
record_set_obj = [record_set_obj]
for entry in record_set_obj:
record_set_ttl = entry['ttl']
record_set_key = '{}{}'.format(record_set_name.lower(), record_set_type)
record = _build_record(cmd, entry)
if not record:
logger.warning('Cannot import %s. RecordType is not found. Skipping...', entry['delim'].lower())
continue
record_set = record_sets.get(record_set_key, None)
if not record_set:
# Workaround for issue #2824
relative_record_set_name = record_set_name.rstrip('.')
if not relative_record_set_name.endswith(origin):
logger.warning(
'Cannot import %s. Only records relative to origin may be '
'imported at this time. Skipping...', relative_record_set_name)
continue
record_set = RecordSet(ttl=record_set_ttl)
record_sets[record_set_key] = record_set
_add_record(record_set, record, record_set_type,
is_list=record_set_type.lower() not in ['soa', 'cname'])
total_records = 0
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = rs_name[:-(len(origin) + 1)] if rs_name != origin else '@'
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
total_records += record_count
cum_records = 0
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
print('== BEGINNING ZONE IMPORT: {} ==\n'.format(zone_name), file=sys.stderr)
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
client.zones.create_or_update(resource_group_name, zone_name, Zone(location='global'))
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = '@' if rs_name == origin else rs_name
if rs_name.endswith(origin):
rs_name = rs_name[:-(len(origin) + 1)]
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
if rs_name == '@' and rs_type == 'soa':
root_soa = client.record_sets.get(resource_group_name, zone_name, '@', 'SOA')
rs.soa_record.host = root_soa.soa_record.host
rs_name = '@'
elif rs_name == '@' and rs_type == 'ns':
root_ns = client.record_sets.get(resource_group_name, zone_name, '@', 'NS')
root_ns.ttl = rs.ttl
rs = root_ns
rs_type = rs.type.rsplit('/', 1)[1]
try:
client.record_sets.create_or_update(
resource_group_name, zone_name, rs_name, rs_type, rs)
cum_records += record_count
print("({}/{}) Imported {} records of type '{}' and name '{}'"
.format(cum_records, total_records, record_count, rs_type, rs_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print("\n== {}/{} RECORDS IMPORTED SUCCESSFULLY: '{}' =="
.format(cum_records, total_records, zone_name), file=sys.stderr)
def add_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
ttl=3600, if_none_match=None):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
ttl=3600, if_none_match=None):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name, 'arecords',
ttl=ttl, if_none_match=if_none_match)
def add_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value, flags, tag,
ttl=3600, if_none_match=None):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname, ttl=3600, if_none_match=None):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, ttl=ttl, if_none_match=if_none_match)
def add_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
ttl=3600, if_none_match=None):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
subscription_id=None, ttl=3600, if_none_match=None):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
subscription_id=subscription_id, ttl=ttl, if_none_match=if_none_match)
def add_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname, ttl=3600, if_none_match=None):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def update_dns_soa_record(cmd, resource_group_name, zone_name, host=None, email=None,
serial_number=None, refresh_time=None, retry_time=None, expire_time=None,
minimum_ttl=3600, if_none_match=None):
record_set_name = '@'
record_type = 'soa'
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record = record_set.soa_record
record.host = host or record.host
record.email = email or record.email
record.serial_number = serial_number or record.serial_number
record.refresh_time = refresh_time or record.refresh_time
record.retry_time = retry_time or record.retry_time
record.expire_time = expire_time or record.expire_time
record.minimum_ttl = minimum_ttl or record.minimum_ttl
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, if_none_match=if_none_match)
def add_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, if_none_match=None):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def add_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value, if_none_match=None):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
long_text = ''.join(x for x in record.value)
original_len = len(long_text)
record.value = []
while len(long_text) > 255:
record.value.append(long_text[:255])
long_text = long_text[255:]
record.value.append(long_text)
final_str = ''.join(record.value)
final_len = len(final_str)
assert original_len == final_len
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def remove_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
keep_empty_record_set=False):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
keep_empty_record_set=False):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value,
flags, tag, keep_empty_record_set=False):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname,
keep_empty_record_set=False):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, keep_empty_record_set=keep_empty_record_set)
def remove_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
keep_empty_record_set=False):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, keep_empty_record_set=False):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value,
keep_empty_record_set=False):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def _check_a_record_exist(record, exist_list):
for r in exist_list:
if r.ipv4_address == record.ipv4_address:
return True
return False
def _check_aaaa_record_exist(record, exist_list):
for r in exist_list:
if r.ipv6_address == record.ipv6_address:
return True
return False
def _check_caa_record_exist(record, exist_list):
for r in exist_list:
if (r.flags == record.flags and
r.tag == record.tag and
r.value == record.value):
return True
return False
def _check_cname_record_exist(record, exist_list):
for r in exist_list:
if r.cname == record.cname:
return True
return False
def _check_mx_record_exist(record, exist_list):
for r in exist_list:
if (r.preference == record.preference and
r.exchange == record.exchange):
return True
return False
def _check_ns_record_exist(record, exist_list):
for r in exist_list:
if r.nsdname == record.nsdname:
return True
return False
def _check_ptr_record_exist(record, exist_list):
for r in exist_list:
if r.ptrdname == record.ptrdname:
return True
return False
def _check_srv_record_exist(record, exist_list):
for r in exist_list:
if (r.priority == record.priority and
r.weight == record.weight and
r.port == record.port and
r.target == record.target):
return True
return False
def _check_txt_record_exist(record, exist_list):
for r in exist_list:
if r.value == record.value:
return True
return False
def _record_exist_func(record_type):
return globals()["_check_{}_record_exist".format(record_type)]
def _add_record(record_set, record, record_type, is_list=False):
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is None:
setattr(record_set, record_property, [])
record_list = getattr(record_set, record_property)
_record_exist = _record_exist_func(record_type)
if not _record_exist(record, record_list):
record_list.append(record)
else:
setattr(record_set, record_property, record)
def _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=True, subscription_id=None, ttl=None, if_none_match=None):
from azure.core.exceptions import HttpResponseError
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS,
subscription_id=subscription_id).record_sets
try:
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
except HttpResponseError:
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_set = RecordSet(ttl=3600)
if ttl is not None:
record_set.ttl = ttl
_add_record(record_set, record, record_type, is_list)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name,
record_type, record_set,
if_none_match='*' if if_none_match else None)
def _remove_record(cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set, is_list=True):
ncf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is not None:
keep_list = [r for r in record_list
if not dict_matches_filter(r.__dict__, record.__dict__)]
if len(keep_list) == len(record_list):
raise CLIError('Record {} not found.'.format(str(record)))
setattr(record_set, record_property, keep_list)
else:
setattr(record_set, record_property, None)
if is_list:
records_remaining = len(getattr(record_set, record_property))
else:
records_remaining = 1 if getattr(record_set, record_property) is not None else 0
if not records_remaining and not keep_empty_record_set:
logger.info('Removing empty %s record set: %s', record_type, record_set_name)
return ncf.delete(resource_group_name, zone_name, record_set_name, record_type)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name, record_type, record_set)
def dict_matches_filter(d, filter_dict):
sentinel = object()
return all(not filter_dict.get(key, None) or
str(filter_dict[key]) == str(d.get(key, sentinel)) or
lists_match(filter_dict[key], d.get(key, []))
for key in filter_dict)
def lists_match(l1, l2):
try:
return Counter(l1) == Counter(l2) # pylint: disable=too-many-function-args
except TypeError:
return False
# endregion
# region ExpressRoutes
def create_express_route(cmd, circuit_name, resource_group_name, bandwidth_in_mbps, peering_location,
service_provider_name, location=None, tags=None, no_wait=False,
sku_family=None, sku_tier=None, allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
ExpressRouteCircuit, ExpressRouteCircuitSku, ExpressRouteCircuitServiceProviderProperties, SubResource = \
cmd.get_models(
'ExpressRouteCircuit', 'ExpressRouteCircuitSku', 'ExpressRouteCircuitServiceProviderProperties',
'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_circuits
sku_name = '{}_{}'.format(sku_tier, sku_family)
circuit = ExpressRouteCircuit(
location=location, tags=tags,
service_provider_properties=ExpressRouteCircuitServiceProviderProperties(
service_provider_name=service_provider_name,
peering_location=peering_location,
bandwidth_in_mbps=bandwidth_in_mbps if not express_route_port else None),
sku=ExpressRouteCircuitSku(name=sku_name, tier=sku_tier, family=sku_family),
allow_global_reach=allow_global_reach,
bandwidth_in_gbps=(int(bandwidth_in_mbps) / 1000) if express_route_port else None
)
if cmd.supported_api_version(min_api='2010-07-01') and allow_classic_operations is not None:
circuit.allow_classic_operations = allow_classic_operations
if cmd.supported_api_version(min_api='2018-08-01') and express_route_port:
circuit.express_route_port = SubResource(id=express_route_port)
circuit.service_provider_properties = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, circuit_name, circuit)
def update_express_route(instance, cmd, bandwidth_in_mbps=None, peering_location=None,
service_provider_name=None, sku_family=None, sku_tier=None, tags=None,
allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
with cmd.update_context(instance) as c:
c.set_param('allow_classic_operations', allow_classic_operations)
c.set_param('tags', tags)
c.set_param('allow_global_reach', allow_global_reach)
with cmd.update_context(instance.sku) as c:
c.set_param('family', sku_family)
c.set_param('tier', sku_tier)
with cmd.update_context(instance.service_provider_properties) as c:
c.set_param('peering_location', peering_location)
c.set_param('service_provider_name', service_provider_name)
if express_route_port is not None:
SubResource = cmd.get_models('SubResource')
instance.express_route_port = SubResource(id=express_route_port)
instance.service_provider_properties = None
if bandwidth_in_mbps is not None:
if not instance.express_route_port:
instance.service_provider_properties.bandwith_in_mbps = float(bandwidth_in_mbps)
else:
instance.bandwidth_in_gbps = (float(bandwidth_in_mbps) / 1000)
return instance
def create_express_route_peering_connection(cmd, resource_group_name, circuit_name, peering_name, connection_name,
peer_circuit, address_prefix, authorization_key=None):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
ExpressRouteCircuitConnection, SubResource = cmd.get_models('ExpressRouteCircuitConnection', 'SubResource')
source_circuit = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='expressRouteCircuits',
name=circuit_name,
child_type_1='peerings',
child_name_1=peering_name
)
conn = ExpressRouteCircuitConnection(
express_route_circuit_peering=SubResource(id=source_circuit),
peer_express_route_circuit_peering=SubResource(id=peer_circuit),
address_prefix=address_prefix,
authorization_key=authorization_key
)
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def _validate_ipv6_address_prefixes(prefixes):
from ipaddress import ip_network, IPv6Network
prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
version = None
for prefix in prefixes:
try:
network = ip_network(prefix)
if version is None:
version = type(network)
else:
if not isinstance(network, version): # pylint: disable=isinstance-second-argument-not-valid-type
raise CLIError("usage error: '{}' incompatible mix of IPv4 and IPv6 address prefixes."
.format(prefixes))
except ValueError:
raise CLIError("usage error: prefix '{}' is not recognized as an IPv4 or IPv6 address prefix."
.format(prefix))
return version == IPv6Network
def create_express_route_peering(
cmd, client, resource_group_name, circuit_name, peering_type, peer_asn, vlan_id,
primary_peer_address_prefix, secondary_peer_address_prefix, shared_key=None,
advertised_public_prefixes=None, customer_asn=None, routing_registry_name=None,
route_filter=None, legacy_mode=None, ip_version='IPv4'):
(ExpressRouteCircuitPeering, ExpressRouteCircuitPeeringConfig, RouteFilter) = \
cmd.get_models('ExpressRouteCircuitPeering', 'ExpressRouteCircuitPeeringConfig', 'RouteFilter')
if cmd.supported_api_version(min_api='2018-02-01'):
ExpressRoutePeeringType = cmd.get_models('ExpressRoutePeeringType')
else:
ExpressRoutePeeringType = cmd.get_models('ExpressRouteCircuitPeeringType')
if ip_version == 'IPv6' and cmd.supported_api_version(min_api='2020-08-01'):
Ipv6ExpressRouteCircuitPeeringConfig = cmd.get_models('Ipv6ExpressRouteCircuitPeeringConfig')
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
microsoft_config = ExpressRouteCircuitPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
else:
microsoft_config = None
ipv6 = Ipv6ExpressRouteCircuitPeeringConfig(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
peering = ExpressRouteCircuitPeering(peering_type=peering_type, ipv6_peering_config=ipv6, peer_asn=peer_asn,
vlan_id=vlan_id)
else:
peering = ExpressRouteCircuitPeering(
peering_type=peering_type, peer_asn=peer_asn, vlan_id=vlan_id,
primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
shared_key=shared_key)
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
peering.microsoft_peering_config = ExpressRouteCircuitPeeringConfig(
advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
if cmd.supported_api_version(min_api='2016-12-01') and route_filter:
peering.route_filter = RouteFilter(id=route_filter)
if cmd.supported_api_version(min_api='2017-10-01') and legacy_mode is not None:
peering.microsoft_peering_config.legacy_mode = legacy_mode
return client.begin_create_or_update(resource_group_name, circuit_name, peering_type, peering)
def _create_or_update_ipv6_peering(cmd, config, primary_peer_address_prefix, secondary_peer_address_prefix,
route_filter, advertised_public_prefixes, customer_asn, routing_registry_name):
if config:
# update scenario
with cmd.update_context(config) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
if route_filter:
RouteFilter = cmd.get_models('RouteFilter')
config.route_filter = RouteFilter(id=route_filter)
else:
# create scenario
IPv6Config, MicrosoftPeeringConfig = cmd.get_models(
'Ipv6ExpressRouteCircuitPeeringConfig', 'ExpressRouteCircuitPeeringConfig')
microsoft_config = MicrosoftPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
config = IPv6Config(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
return config
def update_express_route_peering(cmd, instance, peer_asn=None, primary_peer_address_prefix=None,
secondary_peer_address_prefix=None, vlan_id=None, shared_key=None,
advertised_public_prefixes=None, customer_asn=None,
routing_registry_name=None, route_filter=None, ip_version='IPv4',
legacy_mode=None):
# update settings common to all peering types
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('vlan_id', vlan_id)
c.set_param('shared_key', shared_key)
if ip_version == 'IPv6':
# update is the only way to add IPv6 peering options
instance.ipv6_peering_config = _create_or_update_ipv6_peering(cmd, instance.ipv6_peering_config,
primary_peer_address_prefix,
secondary_peer_address_prefix, route_filter,
advertised_public_prefixes, customer_asn,
routing_registry_name)
else:
# IPv4 Microsoft Peering (or non-Microsoft Peering)
with cmd.update_context(instance) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
if route_filter is not None:
RouteFilter = cmd.get_models('RouteFilter')
instance.route_filter = RouteFilter(id=route_filter)
try:
with cmd.update_context(instance.microsoft_peering_config) as c:
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
c.set_param('legacy_mode', legacy_mode)
except AttributeError:
raise CLIError('--advertised-public-prefixes, --customer-asn, --routing-registry-name and '
'--legacy-mode are only applicable for Microsoft Peering.')
return instance
# endregion
# region ExpressRoute Connection
# pylint: disable=unused-argument
def create_express_route_connection(cmd, resource_group_name, express_route_gateway_name, connection_name,
peering, circuit_name=None, authorization_key=None, routing_weight=None,
enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
ExpressRouteConnection, SubResource, RoutingConfiguration, PropagatedRouteTable\
= cmd.get_models('ExpressRouteConnection', 'SubResource', 'RoutingConfiguration', 'PropagatedRouteTable')
client = network_client_factory(cmd.cli_ctx).express_route_connections
propagated_route_tables = PropagatedRouteTable(
labels=labels,
ids=[SubResource(id=propagated_route_table) for propagated_route_table in
propagated_route_tables] if propagated_route_tables else None
)
routing_configuration = RoutingConfiguration(
associated_route_table=SubResource(id=associated_route_table),
propagated_route_tables=propagated_route_tables
)
connection = ExpressRouteConnection(
name=connection_name,
express_route_circuit_peering=SubResource(id=peering) if peering else None,
authorization_key=authorization_key,
routing_weight=routing_weight,
routing_configuration=routing_configuration
)
if enable_internet_security and cmd.supported_api_version(min_api='2019-09-01'):
connection.enable_internet_security = enable_internet_security
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, connection_name, connection)
# pylint: disable=unused-argument
def update_express_route_connection(instance, cmd, circuit_name=None, peering=None, authorization_key=None,
routing_weight=None, enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
SubResource = cmd.get_models('SubResource')
if peering is not None:
instance.express_route_connection_id = SubResource(id=peering)
if authorization_key is not None:
instance.authorization_key = authorization_key
if routing_weight is not None:
instance.routing_weight = routing_weight
if enable_internet_security is not None and cmd.supported_api_version(min_api='2019-09-01'):
instance.enable_internet_security = enable_internet_security
if associated_route_table is not None or propagated_route_tables is not None or labels is not None:
if instance.routing_configuration is None:
RoutingConfiguration = cmd.get_models('RoutingConfiguration')
instance.routing_configuration = RoutingConfiguration()
if associated_route_table is not None:
instance.routing_configuration.associated_route_table = SubResource(id=associated_route_table)
if propagated_route_tables is not None or labels is not None:
if instance.routing_configuration.propagated_route_tables is None:
PropagatedRouteTable = cmd.get_models('PropagatedRouteTable')
instance.routing_configuration.propagated_route_tables = PropagatedRouteTable()
if propagated_route_tables is not None:
instance.routing_configuration.propagated_route_tables.ids = [SubResource(id=propagated_route_table) for propagated_route_table in propagated_route_tables] # pylint: disable=line-too-long
if labels is not None:
instance.routing_configuration.propagated_route_tables.labels = labels
return instance
# endregion
# region ExpressRoute Gateways
def create_express_route_gateway(cmd, resource_group_name, express_route_gateway_name, location=None, tags=None,
min_val=2, max_val=None, virtual_hub=None):
ExpressRouteGateway, SubResource = cmd.get_models('ExpressRouteGateway', 'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_gateways
gateway = ExpressRouteGateway(
location=location,
tags=tags,
virtual_hub=SubResource(id=virtual_hub) if virtual_hub else None
)
if min or max:
gateway.auto_scale_configuration = {'bounds': {'min': min_val, 'max': max_val}}
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, gateway)
def update_express_route_gateway(instance, cmd, tags=None, min_val=None, max_val=None):
def _ensure_autoscale():
if not instance.auto_scale_configuration:
ExpressRouteGatewayPropertiesAutoScaleConfiguration, \
ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds = cmd.get_models(
'ExpressRouteGatewayPropertiesAutoScaleConfiguration',
'ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds')
instance.auto_scale_configuration = ExpressRouteGatewayPropertiesAutoScaleConfiguration(
bounds=ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds(min=min, max=max))
if tags is not None:
instance.tags = tags
if min is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.min = min_val
if max is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.max = max_val
return instance
def list_express_route_gateways(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_gateways
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
# endregion
# region ExpressRoute ports
def create_express_route_port(cmd, resource_group_name, express_route_port_name, location=None, tags=None,
peering_location=None, bandwidth_in_gbps=None, encapsulation=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ExpressRoutePort = cmd.get_models('ExpressRoutePort')
if bandwidth_in_gbps is not None:
bandwidth_in_gbps = int(bandwidth_in_gbps)
port = ExpressRoutePort(
location=location,
tags=tags,
peering_location=peering_location,
bandwidth_in_gbps=bandwidth_in_gbps,
encapsulation=encapsulation
)
return client.begin_create_or_update(resource_group_name, express_route_port_name, port)
def update_express_route_port(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags, True)
return instance
def download_generated_loa_as_pdf(cmd,
resource_group_name,
express_route_port_name,
customer_name,
file_path='loa.pdf'):
import os
import base64
dirname, basename = os.path.dirname(file_path), os.path.basename(file_path)
if basename == '':
basename = 'loa.pdf'
elif basename.endswith('.pdf') is False:
basename = basename + '.pdf'
file_path = os.path.join(dirname, basename)
generate_express_route_ports_loa_request =\
cmd.get_models('GenerateExpressRoutePortsLOARequest')(customer_name=customer_name)
client = network_client_factory(cmd.cli_ctx).express_route_ports
response = client.generate_loa(resource_group_name, express_route_port_name,
generate_express_route_ports_loa_request)
encoded_content = base64.b64decode(response.encoded_content)
from azure.cli.core.azclierror import FileOperationError
try:
with open(file_path, 'wb') as f:
f.write(encoded_content)
except OSError as ex:
raise FileOperationError(ex)
logger.warning("The generated letter of authorization is saved at %s", file_path)
def list_express_route_ports(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def assign_express_route_port_identity(cmd, resource_group_name, express_route_port_name,
user_assigned_identity, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity', 'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_identity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_identity_instance
identity_instance = ManagedServiceIdentity(type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance)
ports.identity = identity_instance
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def remove_express_route_port_identity(cmd, resource_group_name, express_route_port_name, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
if ports.identity is None:
logger.warning("The identity of the ExpressRoute Port doesn't exist.")
return ports
ports.identity = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def show_express_route_port_identity(cmd, resource_group_name, express_route_port_name):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
return ports.identity
def update_express_route_port_link(cmd, instance, parent, express_route_port_name, link_name,
macsec_cak_secret_identifier=None, macsec_ckn_secret_identifier=None,
macsec_sci_state=None, macsec_cipher=None, admin_state=None):
"""
:param cmd:
:param instance: an instance of ExpressRoutePort
:param express_route_port_name:
:param link_name:
:param macsec_cak_secret_identifier:
:param macsec_ckn_secret_identifier:
:param macsec_cipher:
:param admin_state:
:return:
"""
if any([macsec_cak_secret_identifier, macsec_ckn_secret_identifier, macsec_cipher, macsec_sci_state]):
instance.mac_sec_config.cak_secret_identifier = macsec_cak_secret_identifier
instance.mac_sec_config.ckn_secret_identifier = macsec_ckn_secret_identifier
# TODO https://github.com/Azure/azure-rest-api-specs/issues/7569
# need to remove this conversion when the issue is fixed.
if macsec_cipher is not None:
macsec_ciphers_tmp = {'gcm-aes-128': 'GcmAes128', 'gcm-aes-256': 'GcmAes256'}
macsec_cipher = macsec_ciphers_tmp.get(macsec_cipher, macsec_cipher)
instance.mac_sec_config.cipher = macsec_cipher
instance.mac_sec_config.sci_state = macsec_sci_state
if admin_state is not None:
instance.admin_state = admin_state
return parent
# endregion
# region PrivateEndpoint
def create_private_endpoint(cmd, resource_group_name, private_endpoint_name, subnet,
private_connection_resource_id, connection_name, group_ids=None,
virtual_network_name=None, tags=None, location=None,
request_message=None, manual_request=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
PrivateEndpoint, Subnet, PrivateLinkServiceConnection = cmd.get_models('PrivateEndpoint',
'Subnet',
'PrivateLinkServiceConnection')
pls_connection = PrivateLinkServiceConnection(private_link_service_id=private_connection_resource_id,
group_ids=group_ids,
request_message=request_message,
name=connection_name)
private_endpoint = PrivateEndpoint(
location=location,
tags=tags,
subnet=Subnet(id=subnet)
)
if manual_request:
private_endpoint.manual_private_link_service_connections = [pls_connection]
else:
private_endpoint.private_link_service_connections = [pls_connection]
if edge_zone:
private_endpoint.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def update_private_endpoint(instance, cmd, tags=None, request_message=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
if request_message is not None:
if instance.private_link_service_connections:
instance.private_link_service_connections[0].request_message = request_message
else:
instance.manual_private_link_service_connections[0].request_message = request_message
return instance
def list_private_endpoints(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def create_private_endpoint_private_dns_zone_group(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneGroup, PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneGroup', 'PrivateDnsZoneConfig')
private_dns_zone_group = PrivateDnsZoneGroup(name=private_dns_zone_group_name,
private_dns_zone_configs=[PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, # pylint: disable=line-too-long
name=private_dns_zone_name)]) # pylint: disable=line-too-long
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def add_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneConfig')
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone = PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, name=private_dns_zone_name)
private_dns_zone_group.private_dns_zone_configs.append(private_dns_zone)
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def remove_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone_configs = [item for item in private_dns_zone_group.private_dns_zone_configs if item.name != private_dns_zone_name] # pylint: disable=line-too-long
private_dns_zone_group.private_dns_zone_configs = private_dns_zone_configs
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
# endregion
# region PrivateLinkService
def create_private_link_service(cmd, resource_group_name, service_name, subnet, frontend_ip_configurations,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
virtual_network_name=None, public_ip_address=None,
location=None, tags=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None,
enable_proxy_protocol=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
FrontendIPConfiguration, PrivateLinkService, PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = \
cmd.get_models('FrontendIPConfiguration', 'PrivateLinkService', 'PrivateLinkServiceIpConfiguration',
'PublicIPAddress', 'Subnet')
pls_ip_config = PrivateLinkServiceIpConfiguration(
name='{}_ipconfig_0'.format(service_name),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service = PrivateLinkService(
location=location,
load_balancer_frontend_ip_configurations=frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
],
ip_configurations=[pls_ip_config],
visbility=visibility,
auto_approval=auto_approval,
fqdns=fqdns,
tags=tags,
enable_proxy_protocol=enable_proxy_protocol
)
if edge_zone:
link_service.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def update_private_link_service(instance, cmd, tags=None, frontend_ip_configurations=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None, enable_proxy_protocol=None):
FrontendIPConfiguration = cmd.get_models('FrontendIPConfiguration')
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('load_balancer_frontend_ip_configurations', frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
])
c.set_param('visibility', visibility)
c.set_param('auto_approval', auto_approval)
c.set_param('fqdns', fqdns)
c.set_param('enable_proxy_protocol', enable_proxy_protocol)
return instance
def list_private_link_services(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def update_private_endpoint_connection(cmd, resource_group_name, service_name, pe_connection_name,
connection_status, description=None, action_required=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateEndpointConnection, PrivateLinkServiceConnectionState = cmd.get_models('PrivateEndpointConnection',
'PrivateLinkServiceConnectionState')
connection_state = PrivateLinkServiceConnectionState(
status=connection_status,
description=description,
actions_required=action_required
)
pe_connection = PrivateEndpointConnection(
private_link_service_connection_state=connection_state
)
return client.update_private_endpoint_connection(resource_group_name, service_name, pe_connection_name, pe_connection) # pylint: disable=line-too-long
def add_private_link_services_ipconfig(cmd, resource_group_name, service_name,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
subnet=None, virtual_network_name=None, public_ip_address=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = cmd.get_models('PrivateLinkServiceIpConfiguration',
'PublicIPAddress',
'Subnet')
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_name_index = len(link_service.ip_configurations)
ip_config = PrivateLinkServiceIpConfiguration(
name='{0}_ipconfig_{1}'.format(service_name, ip_name_index),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service.ip_configurations.append(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def remove_private_link_services_ipconfig(cmd, resource_group_name, service_name, ip_config_name):
client = network_client_factory(cmd.cli_ctx).private_link_services
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_config = None
for item in link_service.ip_configurations:
if item.name == ip_config_name:
ip_config = item
break
if ip_config is None: # pylint: disable=no-else-return
logger.warning("%s ip configuration doesn't exist", ip_config_name)
return link_service
else:
link_service.ip_configurations.remove(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
# endregion
def _edge_zone_model(cmd, edge_zone):
ExtendedLocation, ExtendedLocationTypes = cmd.get_models('ExtendedLocation', 'ExtendedLocationTypes')
return ExtendedLocation(name=edge_zone, type=ExtendedLocationTypes.EDGE_ZONE)
# region LoadBalancers
def create_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
public_ip_dns_name=None, subnet=None, subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
no_wait=False, sku=None, frontend_ip_zone=None, public_ip_zone=None,
private_ip_address_version=None, edge_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if edge_zone and cmd.supported_api_version(min_api='2020-08-01'):
edge_zone_type = 'EdgeZone'
else:
edge_zone_type = None
if subnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(
network_id_template, virtual_network_name, subnet)
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, None, edge_zone, edge_zone_type))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, subnet_id, private_ip_address, private_ip_allocation, sku,
frontend_ip_zone, private_ip_address_version, None, edge_zone, edge_zone_type)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def list_load_balancer_nic(cmd, resource_group_name, load_balancer_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_network_interfaces
return client.list(resource_group_name, load_balancer_name)
def create_lb_inbound_nat_rule(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port,
backend_port, frontend_ip_name=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None):
InboundNatRule = cmd.get_models('InboundNatRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) # pylint: disable=no-member
new_rule = InboundNatRule(
name=item_name, protocol=protocol,
frontend_port=frontend_port, backend_port=backend_port,
frontend_ip_configuration=frontend_ip,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
upsert_to_collection(lb, 'inbound_nat_rules', new_rule, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_rules, item_name)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get(client, resource_group_name, load_balancer_name):
lb = client.get(resource_group_name, load_balancer_name)
return lb_get_operation(lb)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get_operation(lb):
for item in lb.frontend_ip_configurations:
if item.zones is not None and len(item.zones) >= 3 and item.subnet is None:
item.zones = None
return lb
def set_lb_inbound_nat_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None):
if frontend_ip_name:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_floating_ip', floating_ip)
return parent
def create_lb_inbound_nat_pool(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port_range_start,
frontend_port_range_end, backend_port, frontend_ip_name=None, enable_tcp_reset=None,
floating_ip=None, idle_timeout=None):
InboundNatPool = cmd.get_models('InboundNatPool')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) \
if frontend_ip_name else None
new_pool = InboundNatPool(
name=item_name,
protocol=protocol,
frontend_ip_configuration=frontend_ip,
frontend_port_range_start=frontend_port_range_start,
frontend_port_range_end=frontend_port_range_end,
backend_port=backend_port,
enable_tcp_reset=enable_tcp_reset,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout)
upsert_to_collection(lb, 'inbound_nat_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_pools, item_name)
def set_lb_inbound_nat_pool(
cmd, instance, parent, item_name, protocol=None,
frontend_port_range_start=None, frontend_port_range_end=None, backend_port=None,
frontend_ip_name=None, enable_tcp_reset=None, floating_ip=None, idle_timeout=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port_range_start', frontend_port_range_start)
c.set_param('frontend_port_range_end', frontend_port_range_end)
c.set_param('backend_port', backend_port)
c.set_param('enable_floating_ip', floating_ip)
c.set_param('idle_timeout_in_minutes', idle_timeout)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
if frontend_ip_name == '':
instance.frontend_ip_configuration = None
elif frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
return parent
def create_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, subnet=None, virtual_network_name=None, private_ip_address=None,
private_ip_address_version=None, private_ip_address_allocation=None, zone=None):
FrontendIPConfiguration, SubResource, Subnet = cmd.get_models(
'FrontendIPConfiguration', 'SubResource', 'Subnet')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if private_ip_address_allocation is None:
private_ip_address_allocation = 'static' if private_ip_address else 'dynamic'
new_config = FrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address,
private_ip_address_version=private_ip_address_version,
private_ip_allocation_method=private_ip_address_allocation,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None,
subnet=Subnet(id=subnet) if subnet else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def update_lb_frontend_ip_configuration_setter(cmd, resource_group_name, load_balancer_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).load_balancers
return client.begin_create_or_update(resource_group_name, load_balancer_name, parameters)
def set_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, private_ip_address=None,
private_ip_address_allocation=None, public_ip_address=None,
subnet=None, virtual_network_name=None, public_ip_prefix=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if not private_ip_address:
instance.private_ip_allocation_method = 'dynamic'
instance.private_ip_address = None
elif private_ip_address is not None:
instance.private_ip_allocation_method = 'static'
instance.private_ip_address = private_ip_address
# Doesn't support update operation for now
# if cmd.supported_api_version(min_api='2019-04-01'):
# instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _process_vnet_name_and_id(vnet, cmd, resource_group_name):
if vnet and not is_valid_resource_id(vnet):
vnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet)
return vnet
def _process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name):
if subnet and not is_valid_resource_id(subnet):
vnet = _process_vnet_name_and_id(vnet, cmd, resource_group_name)
if vnet is None:
raise UnrecognizedArgumentError('vnet should be provided when input subnet name instead of subnet id')
subnet = vnet + f'/subnets/{subnet}'
return subnet
# pylint: disable=too-many-branches
def create_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
vnet=None, backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
(BackendAddressPool,
LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
# Before 2020-03-01, service doesn't support the other rest method.
# We have to use old one to keep backward compatibility.
# Same for basic sku. service refuses that basic sku lb call the other rest method.
if cmd.supported_api_version(max_api='2020-03-01') or lb.sku.name.lower() == 'basic':
new_pool = BackendAddressPool(name=backend_address_pool_name)
upsert_to_collection(lb, 'backend_address_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().backend_address_pools, backend_address_pool_name)
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
for addr in addresses_pool:
if 'virtual_network' not in addr and vnet:
addr['virtual_network'] = vnet
# pylint: disable=line-too-long
if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks
try:
if addresses_pool:
new_addresses = []
for addr in addresses_pool:
# vnet | subnet | status
# name/id | name/id/null | ok
# null | id | ok
if 'virtual_network' in addr:
address = LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None,
ip_address=addr['ip_address'])
elif 'subnet' in addr and is_valid_resource_id(addr['subnet']):
address = LoadBalancerBackendAddress(name=addr['name'],
subnet=Subnet(id=addr['subnet']),
ip_address=addr['ip_address'])
else:
raise KeyError
new_addresses.append(address)
else:
new_addresses = None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet '
'name | subnet id) information.')
else:
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
# when sku is 'gateway', 'tunnelInterfaces' can't be None. Otherwise service will response error
if cmd.supported_api_version(min_api='2021-02-01') and lb.sku.name.lower() == 'gateway':
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
new_pool.tunnel_interfaces = [
GatewayLoadBalancerTunnelInterface(type='Internal', protocol='VXLAN', identifier=900)]
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name):
from azure.cli.core.commands import LongRunningOperation
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
def delete_basic_lb_backend_address_pool():
new_be_pools = [pool for pool in lb.backend_address_pools
if pool.name.lower() != backend_address_pool_name.lower()]
lb.backend_address_pools = new_be_pools
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
result = LongRunningOperation(cmd.cli_ctx)(poller).backend_address_pools
if next((x for x in result if x.name.lower() == backend_address_pool_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(backend_address_pool_name, load_balancer_name))
if lb.sku.name.lower() == 'basic':
delete_basic_lb_backend_address_pool()
return None
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
# region cross-region lb
def create_cross_region_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
public_ip_address=None, public_ip_address_allocation=None,
public_ip_dns_name=None, public_ip_address_type=None, validate=False,
no_wait=False, frontend_ip_zone=None, public_ip_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
sku = 'standard'
tier = 'Global'
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, tier))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, None, None, None, sku, frontend_ip_zone, None, tier)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_cross_region_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, zone=None):
FrontendIPConfiguration, SubResource = cmd.get_models(
'FrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_config = FrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def set_cross_region_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, public_ip_address=None, public_ip_prefix=None):
PublicIPAddress, SubResource = cmd.get_models('PublicIPAddress', 'SubResource')
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return parent
def create_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
(BackendAddressPool,
LoadBalancerBackendAddress,
FrontendIPConfiguration) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'FrontendIPConfiguration')
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
# pylint: disable=line-too-long
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=addr['frontend_ip_address'])) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise CLIError('Each backend address must have name and frontend_ip_configuration information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name): # pylint: disable=line-too-long
ncf = network_client_factory(cmd.cli_ctx)
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
def add_cross_region_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name, frontend_ip_address):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
# pylint: disable=line-too-long
(LoadBalancerBackendAddress, FrontendIPConfiguration) = cmd.get_models('LoadBalancerBackendAddress', 'FrontendIPConfiguration')
new_address = LoadBalancerBackendAddress(name=address_name,
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=frontend_ip_address) if frontend_ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def create_cross_region_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, i) for i in backend_pools_name]
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_cross_region_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
# endregion
# pylint: disable=line-too-long
def add_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
address_name, ip_address, vnet=None, subnet=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
(LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
if cmd.supported_api_version(min_api='2020-11-01'):
if vnet:
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=_process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name)) if subnet else None,
virtual_network=VirtualNetwork(id=vnet),
ip_address=ip_address if ip_address else None)
elif is_valid_resource_id(subnet):
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=subnet),
ip_address=ip_address if ip_address else None)
else:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet name | subnet id) information.')
else:
new_address = LoadBalancerBackendAddress(name=address_name,
virtual_network=VirtualNetwork(id=vnet) if vnet else None,
ip_address=ip_address if ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
lb_addresses = [addr for addr in address_pool.load_balancer_backend_addresses if addr.name != address_name]
address_pool.load_balancer_backend_addresses = lb_addresses
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.load_balancer_backend_addresses
def create_lb_outbound_rule(cmd, resource_group_name, load_balancer_name, item_name,
backend_address_pool, frontend_ip_configurations, protocol,
outbound_ports=None, enable_tcp_reset=None, idle_timeout=None):
OutboundRule, SubResource = cmd.get_models('OutboundRule', 'SubResource')
client = network_client_factory(cmd.cli_ctx).load_balancers
lb = lb_get(client, resource_group_name, load_balancer_name)
rule = OutboundRule(
protocol=protocol, enable_tcp_reset=enable_tcp_reset, idle_timeout_in_minutes=idle_timeout,
backend_address_pool=SubResource(id=backend_address_pool),
frontend_ip_configurations=[SubResource(id=x) for x in frontend_ip_configurations]
if frontend_ip_configurations else None,
allocated_outbound_ports=outbound_ports, name=item_name)
upsert_to_collection(lb, 'outbound_rules', rule, 'name')
poller = client.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().outbound_rules, item_name)
def set_lb_outbound_rule(instance, cmd, parent, item_name, protocol=None, outbound_ports=None,
idle_timeout=None, frontend_ip_configurations=None, enable_tcp_reset=None,
backend_address_pool=None):
SubResource = cmd.get_models('SubResource')
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('allocated_outbound_ports', outbound_ports)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('backend_address_pool', SubResource(id=backend_address_pool)
if backend_address_pool else None)
c.set_param('frontend_ip_configurations',
[SubResource(id=x) for x in frontend_ip_configurations] if frontend_ip_configurations else None)
return parent
def create_lb_probe(cmd, resource_group_name, load_balancer_name, item_name, protocol, port,
path=None, interval=None, threshold=None):
Probe = cmd.get_models('Probe')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_probe = Probe(
protocol=protocol, port=port, interval_in_seconds=interval, number_of_probes=threshold,
request_path=path, name=item_name)
upsert_to_collection(lb, 'probes', new_probe, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().probes, item_name)
def set_lb_probe(cmd, instance, parent, item_name, protocol=None, port=None,
path=None, interval=None, threshold=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('port', port)
c.set_param('request_path', path)
c.set_param('interval_in_seconds', interval)
c.set_param('number_of_probes', threshold)
return parent
def create_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, disable_outbound_snat=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
# avoid break when backend_address_pool_name is None and backend_pools_name is not None
if not backend_address_pool_name and backend_pools_name:
backend_address_pool_name = backend_pools_name[0]
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset,
disable_outbound_snat=disable_outbound_snat)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, name) for name in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
new_rule.backend_address_pool = None
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution='default', floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
disable_outbound_snat=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('disable_outbound_snat', disable_outbound_snat)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
instance.backend_address_pool = None
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
def add_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, protocol, identifier, traffic_type, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
tunnel_interface = GatewayLoadBalancerTunnelInterface(port=port, identifier=identifier, protocol=protocol, type=traffic_type)
if not address_pool.tunnel_interfaces:
address_pool.tunnel_interfaces = []
address_pool.tunnel_interfaces.append(tunnel_interface)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def update_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index, protocol=None, identifier=None, traffic_type=None, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
item = address_pool.tunnel_interfaces[index]
if protocol:
item.protocol = protocol
if identifier:
item.identifier = identifier
if port:
item.port = port
if traffic_type:
item.type = traffic_type
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
address_pool.tunnel_interfaces.pop(index)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.tunnel_interfaces
# endregion
# region LocalGateways
def _validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight):
if any([asn, bgp_peering_address, peer_weight]):
if instance.bgp_settings is not None:
# update existing parameters selectively
if asn is not None:
instance.bgp_settings.asn = asn
if peer_weight is not None:
instance.bgp_settings.peer_weight = peer_weight
if bgp_peering_address is not None:
instance.bgp_settings.bgp_peering_address = bgp_peering_address
elif asn:
BgpSettings = cmd.get_models('BgpSettings')
instance.bgp_settings = BgpSettings(asn, bgp_peering_address, peer_weight)
else:
raise CLIError(
'incorrect usage: --asn ASN [--peer-weight WEIGHT --bgp-peering-address IP]')
def create_local_gateway(cmd, resource_group_name, local_network_gateway_name, gateway_ip_address,
location=None, tags=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, no_wait=False):
AddressSpace, LocalNetworkGateway, BgpSettings = cmd.get_models(
'AddressSpace', 'LocalNetworkGateway', 'BgpSettings')
client = network_client_factory(cmd.cli_ctx).local_network_gateways
local_gateway = LocalNetworkGateway(
local_network_address_space=AddressSpace(address_prefixes=(local_address_prefix or [])),
location=location, tags=tags, gateway_ip_address=gateway_ip_address)
if bgp_peering_address or asn or peer_weight:
local_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, local_network_gateway_name, local_gateway)
def update_local_gateway(cmd, instance, gateway_ip_address=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, tags=None):
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
if gateway_ip_address is not None:
instance.gateway_ip_address = gateway_ip_address
if local_address_prefix is not None:
instance.local_network_address_space.address_prefixes = local_address_prefix
if tags is not None:
instance.tags = tags
return instance
# endregion
# region NetworkInterfaces (NIC)
def create_nic(cmd, resource_group_name, network_interface_name, subnet, location=None, tags=None,
internal_dns_name_label=None, dns_servers=None, enable_ip_forwarding=False,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
load_balancer_name=None, network_security_group=None,
private_ip_address=None, private_ip_address_version=None,
public_ip_address=None, virtual_network_name=None, enable_accelerated_networking=None,
application_security_groups=None, no_wait=False,
app_gateway_backend_address_pools=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
(NetworkInterface, NetworkInterfaceDnsSettings, NetworkInterfaceIPConfiguration, NetworkSecurityGroup,
PublicIPAddress, Subnet, SubResource) = cmd.get_models(
'NetworkInterface', 'NetworkInterfaceDnsSettings', 'NetworkInterfaceIPConfiguration',
'NetworkSecurityGroup', 'PublicIPAddress', 'Subnet', 'SubResource')
dns_settings = NetworkInterfaceDnsSettings(internal_dns_name_label=internal_dns_name_label,
dns_servers=dns_servers or [])
nic = NetworkInterface(location=location, tags=tags, enable_ip_forwarding=enable_ip_forwarding,
dns_settings=dns_settings)
if cmd.supported_api_version(min_api='2016-09-01'):
nic.enable_accelerated_networking = enable_accelerated_networking
if network_security_group:
nic.network_security_group = NetworkSecurityGroup(id=network_security_group)
ip_config_args = {
'name': 'ipconfig1',
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic',
'private_ip_address': private_ip_address,
'subnet': Subnet(id=subnet),
'application_gateway_backend_address_pools':
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if app_gateway_backend_address_pools else None
}
if cmd.supported_api_version(min_api='2016-09-01'):
ip_config_args['private_ip_address_version'] = private_ip_address_version
if cmd.supported_api_version(min_api='2017-09-01'):
ip_config_args['application_security_groups'] = application_security_groups
ip_config = NetworkInterfaceIPConfiguration(**ip_config_args)
if public_ip_address:
ip_config.public_ip_address = PublicIPAddress(id=public_ip_address)
nic.ip_configurations = [ip_config]
if edge_zone:
nic.extended_location = _edge_zone_model(cmd, edge_zone)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, network_interface_name, nic)
def update_nic(cmd, instance, network_security_group=None, enable_ip_forwarding=None,
internal_dns_name_label=None, dns_servers=None, enable_accelerated_networking=None):
if enable_ip_forwarding is not None:
instance.enable_ip_forwarding = enable_ip_forwarding
if network_security_group == '':
instance.network_security_group = None
elif network_security_group is not None:
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
if internal_dns_name_label == '':
instance.dns_settings.internal_dns_name_label = None
elif internal_dns_name_label is not None:
instance.dns_settings.internal_dns_name_label = internal_dns_name_label
if dns_servers == ['']:
instance.dns_settings.dns_servers = None
elif dns_servers:
instance.dns_settings.dns_servers = dns_servers
if enable_accelerated_networking is not None:
instance.enable_accelerated_networking = enable_accelerated_networking
return instance
def create_nic_ip_config(cmd, resource_group_name, network_interface_name, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None,
make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None):
NetworkInterfaceIPConfiguration, PublicIPAddress, Subnet, SubResource = cmd.get_models(
'NetworkInterfaceIPConfiguration', 'PublicIPAddress', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
nic = ncf.network_interfaces.get(resource_group_name, network_interface_name)
if cmd.supported_api_version(min_api='2016-09-01'):
IPVersion = cmd.get_models('IPVersion')
private_ip_address_version = private_ip_address_version or IPVersion.I_PV4.value
if private_ip_address_version == IPVersion.I_PV4.value and not subnet:
primary_config = next(x for x in nic.ip_configurations if x.primary)
subnet = primary_config.subnet.id
if make_primary:
for config in nic.ip_configurations:
config.primary = False
new_config_args = {
'name': ip_config_name,
'subnet': Subnet(id=subnet) if subnet else None,
'public_ip_address': PublicIPAddress(id=public_ip_address) if public_ip_address else None,
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_address': private_ip_address,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic'
}
if cmd.supported_api_version(min_api='2016-09-01'):
new_config_args['private_ip_address_version'] = private_ip_address_version
new_config_args['primary'] = make_primary
if cmd.supported_api_version(min_api='2017-09-01'):
new_config_args['application_security_groups'] = application_security_groups
if cmd.supported_api_version(min_api='2018-08-01'):
new_config_args['application_gateway_backend_address_pools'] = \
[SubResource(id=x) for x in app_gateway_backend_address_pools] \
if app_gateway_backend_address_pools else None
new_config = NetworkInterfaceIPConfiguration(**new_config_args)
upsert_to_collection(nic, 'ip_configurations', new_config, 'name')
poller = ncf.network_interfaces.begin_create_or_update(
resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def update_nic_ip_config_setter(cmd, resource_group_name, network_interface_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).network_interfaces
return client.begin_create_or_update(resource_group_name, network_interface_name, parameters)
def set_nic_ip_config(cmd, instance, parent, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None, make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if make_primary:
for config in parent.ip_configurations:
config.primary = False
instance.primary = True
if private_ip_address == '':
# switch private IP address allocation to Dynamic if empty string is used
instance.private_ip_address = None
instance.private_ip_allocation_method = 'dynamic'
if cmd.supported_api_version(min_api='2016-09-01'):
instance.private_ip_address_version = 'ipv4'
elif private_ip_address is not None:
# if specific address provided, allocation is static
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'static'
if private_ip_address_version is not None:
instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if load_balancer_backend_address_pool_ids == '':
instance.load_balancer_backend_address_pools = None
elif load_balancer_backend_address_pool_ids is not None:
instance.load_balancer_backend_address_pools = load_balancer_backend_address_pool_ids
if load_balancer_inbound_nat_rule_ids == '':
instance.load_balancer_inbound_nat_rules = None
elif load_balancer_inbound_nat_rule_ids is not None:
instance.load_balancer_inbound_nat_rules = load_balancer_inbound_nat_rule_ids
if application_security_groups == ['']:
instance.application_security_groups = None
elif application_security_groups:
instance.application_security_groups = application_security_groups
if app_gateway_backend_address_pools == ['']:
instance.application_gateway_backend_address_pools = None
elif app_gateway_backend_address_pools:
instance.application_gateway_backend_address_pools = \
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _get_nic_ip_config(nic, name):
if nic.ip_configurations:
ip_config = next(
(x for x in nic.ip_configurations if x.name.lower() == name.lower()), None)
else:
ip_config = None
if not ip_config:
raise CLIError('IP configuration {} not found.'.format(name))
return ip_config
def add_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
BackendAddressPool = cmd.get_models('BackendAddressPool')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
upsert_to_collection(ip_config, 'load_balancer_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
elif application_gateway_name:
upsert_to_collection(ip_config, 'application_gateway_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
keep_items = [x for x in ip_config.load_balancer_backend_address_pools or [] if x.id != backend_address_pool]
ip_config.load_balancer_backend_address_pools = keep_items
elif application_gateway_name:
keep_items = [x for x in ip_config.application_gateway_backend_address_pools or [] if
x.id != backend_address_pool]
ip_config.application_gateway_backend_address_pools = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def add_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
InboundNatRule = cmd.get_models('InboundNatRule')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
upsert_to_collection(ip_config, 'load_balancer_inbound_nat_rules',
InboundNatRule(id=inbound_nat_rule),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
keep_items = \
[x for x in ip_config.load_balancer_inbound_nat_rules or [] if x.id != inbound_nat_rule]
ip_config.load_balancer_inbound_nat_rules = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
# endregion
# region NetworkSecurityGroups
def create_nsg(cmd, resource_group_name, network_security_group_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).network_security_groups
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
nsg = NetworkSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, network_security_group_name, nsg)
def _create_singular_or_plural_property(kwargs, val, singular_name, plural_name):
if not val:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
kwargs[plural_name] = val
kwargs[singular_name] = None
else:
kwargs[singular_name] = val[0]
kwargs[plural_name] = None
def _handle_asg_property(kwargs, key, asgs):
prefix = key.split('_', 1)[0] + '_'
if asgs:
kwargs[key] = asgs
if kwargs[prefix + 'address_prefix'].is_default:
kwargs[prefix + 'address_prefix'] = ''
def create_nsg_rule_2017_06_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_ranges='*', source_address_prefixes='*',
destination_port_ranges=80, destination_address_prefixes='*',
source_asgs=None, destination_asgs=None):
kwargs = {
'protocol': protocol,
'direction': direction,
'description': description,
'priority': priority,
'access': access,
'name': security_rule_name
}
_create_singular_or_plural_property(kwargs, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_create_singular_or_plural_property(kwargs, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_create_singular_or_plural_property(kwargs, source_port_ranges,
'source_port_range', 'source_port_ranges')
_create_singular_or_plural_property(kwargs, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
kwargs['source_address_prefix'] = kwargs['source_address_prefix'] or ''
kwargs['destination_address_prefix'] = kwargs['destination_address_prefix'] or ''
if cmd.supported_api_version(min_api='2017-09-01'):
_handle_asg_property(kwargs, 'source_application_security_groups', source_asgs)
_handle_asg_property(kwargs, 'destination_application_security_groups', destination_asgs)
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(**kwargs)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def create_nsg_rule_2017_03_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_range='*', source_address_prefix='*',
destination_port_range=80, destination_address_prefix='*'):
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(protocol=protocol, source_address_prefix=source_address_prefix,
destination_address_prefix=destination_address_prefix, access=access,
direction=direction,
description=description, source_port_range=source_port_range,
destination_port_range=destination_port_range, priority=priority,
name=security_rule_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def _update_singular_or_plural_property(instance, val, singular_name, plural_name):
if val is None:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
setattr(instance, plural_name, val)
setattr(instance, singular_name, None)
else:
setattr(instance, plural_name, None)
setattr(instance, singular_name, val[0])
def update_nsg_rule_2017_06_01(instance, protocol=None, source_address_prefixes=None,
destination_address_prefixes=None, access=None, direction=None, description=None,
source_port_ranges=None, destination_port_ranges=None, priority=None,
source_asgs=None, destination_asgs=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.priority = priority if priority is not None else instance.priority
_update_singular_or_plural_property(instance, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_update_singular_or_plural_property(instance, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_update_singular_or_plural_property(instance, source_port_ranges,
'source_port_range', 'source_port_ranges')
_update_singular_or_plural_property(instance, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
instance.source_address_prefix = instance.source_address_prefix or ''
instance.destination_address_prefix = instance.destination_address_prefix or ''
if source_asgs == ['']:
instance.source_application_security_groups = None
elif source_asgs:
instance.source_application_security_groups = source_asgs
if destination_asgs == ['']:
instance.destination_application_security_groups = None
elif destination_asgs:
instance.destination_application_security_groups = destination_asgs
return instance
def update_nsg_rule_2017_03_01(instance, protocol=None, source_address_prefix=None,
destination_address_prefix=None, access=None, direction=None, description=None,
source_port_range=None, destination_port_range=None, priority=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.source_address_prefix = (source_address_prefix if source_address_prefix is not None
else instance.source_address_prefix)
instance.destination_address_prefix = destination_address_prefix \
if destination_address_prefix is not None else instance.destination_address_prefix
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.source_port_range = source_port_range \
if source_port_range is not None else instance.source_port_range
instance.destination_port_range = destination_port_range \
if destination_port_range is not None else instance.destination_port_range
instance.priority = priority if priority is not None else instance.priority
return instance
# endregion
# region NetworkProfiles
def list_network_profiles(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).network_profiles
if resource_group_name:
return client.list(resource_group_name)
return client.list_all()
# endregion
# region NetworkWatchers
def _create_network_watchers(cmd, client, resource_group_name, locations, tags):
if resource_group_name is None:
raise CLIError("usage error: '--resource-group' required when enabling new regions")
NetworkWatcher = cmd.get_models('NetworkWatcher')
for location in locations:
client.create_or_update(
resource_group_name, '{}-watcher'.format(location),
NetworkWatcher(location=location, tags=tags))
def _update_network_watchers(cmd, client, watchers, tags):
NetworkWatcher = cmd.get_models('NetworkWatcher')
for watcher in watchers:
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
watcher_tags = watcher.tags if tags is None else tags
client.create_or_update(
watcher_rg, watcher_name,
NetworkWatcher(location=watcher.location, tags=watcher_tags))
def _delete_network_watchers(cmd, client, watchers):
for watcher in watchers:
from azure.cli.core.commands import LongRunningOperation
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
logger.warning(
"Disabling Network Watcher for region '%s' by deleting resource '%s'",
watcher.location, watcher.id)
LongRunningOperation(cmd.cli_ctx)(client.begin_delete(watcher_rg, watcher_name))
def configure_network_watcher(cmd, client, locations, resource_group_name=None, enabled=None, tags=None):
watcher_list = list(client.list_all())
locations_list = [location.lower() for location in locations]
existing_watchers = [w for w in watcher_list if w.location in locations_list]
nonenabled_regions = list(set(locations) - set(watcher.location for watcher in existing_watchers))
if enabled is None:
if resource_group_name is not None:
logger.warning(
"Resource group '%s' is only used when enabling new regions and will be ignored.",
resource_group_name)
for location in nonenabled_regions:
logger.warning(
"Region '%s' is not enabled for Network Watcher and will be ignored.", location)
_update_network_watchers(cmd, client, existing_watchers, tags)
elif enabled:
_create_network_watchers(cmd, client, resource_group_name, nonenabled_regions, tags)
_update_network_watchers(cmd, client, existing_watchers, tags)
else:
if tags is not None:
raise CLIError("usage error: '--tags' cannot be used when disabling regions")
_delete_network_watchers(cmd, client, existing_watchers)
return client.list_all()
def create_nw_connection_monitor(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
resource_group_name=None,
location=None,
source_resource=None,
source_port=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=None,
output_type=None,
workspace_ids=None,
notes=None):
v1_required_parameter_set = [
source_resource, source_port,
dest_resource, dest_address, dest_port
]
v2_required_parameter_set = [
endpoint_source_name, endpoint_source_resource_id, endpoint_source_type, endpoint_source_coverage_level,
endpoint_dest_name, endpoint_dest_address, endpoint_dest_type, endpoint_dest_coverage_level,
test_config_name, test_config_protocol,
output_type, workspace_ids,
]
if any(v1_required_parameter_set): # V1 creation
connection_monitor = _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name,
source_port,
location,
dest_resource,
dest_port,
dest_address,
tags,
do_not_start,
monitoring_interval)
from azure.cli.core.profiles._shared import AD_HOC_API_VERSIONS
client = get_mgmt_service_client(
cmd.cli_ctx,
ResourceType.MGMT_NETWORK,
api_version=AD_HOC_API_VERSIONS[ResourceType.MGMT_NETWORK]['nw_connection_monitor']
).connection_monitors
elif any(v2_required_parameter_set): # V2 creation
connection_monitor = _create_nw_connection_monitor_v2(cmd,
location,
tags,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address,
endpoint_source_type,
endpoint_source_coverage_level,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address,
endpoint_dest_type,
endpoint_dest_coverage_level,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_preferred_ip_version,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https,
test_group_name,
test_group_disable,
output_type,
workspace_ids,
notes)
else:
raise CLIError('Unknown operation')
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name=None,
source_port=None,
location=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=60):
ConnectionMonitor, ConnectionMonitorSource, ConnectionMonitorDestination = cmd.get_models(
'ConnectionMonitor', 'ConnectionMonitorSource', 'ConnectionMonitorDestination')
cmv1 = ConnectionMonitor(
location=location,
tags=tags,
source=ConnectionMonitorSource(
resource_id=source_resource,
port=source_port
),
destination=ConnectionMonitorDestination(
resource_id=dest_resource,
port=dest_port,
address=dest_address
),
auto_start=not do_not_start,
monitoring_interval_in_seconds=monitoring_interval,
endpoints=None,
test_configurations=None,
test_groups=None,
outputs=None,
notes=None
)
return cmv1
def _create_nw_connection_monitor_v2(cmd,
location=None,
tags=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_tcp_disable_trace_route=False,
test_config_icmp_disable_trace_route=False,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=False,
output_type=None,
workspace_ids=None,
notes=None):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_resource_id=endpoint_source_resource_id,
address=endpoint_source_address,
endpoint_type=endpoint_source_type,
coverage_level=endpoint_source_coverage_level)
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_resource_id=endpoint_dest_resource_id,
address=endpoint_dest_address,
endpoint_type=endpoint_dest_type,
coverage_level=endpoint_dest_coverage_level)
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
test_group = _create_nw_connection_monitor_v2_test_group(cmd,
test_group_name,
test_group_disable,
[test_config],
[src_endpoint],
[dst_endpoint])
if output_type:
outputs = []
if workspace_ids:
for workspace_id in workspace_ids:
output = _create_nw_connection_monitor_v2_output(cmd, output_type, workspace_id)
outputs.append(output)
else:
outputs = []
ConnectionMonitor = cmd.get_models('ConnectionMonitor')
cmv2 = ConnectionMonitor(location=location,
tags=tags,
auto_start=None,
monitoring_interval_in_seconds=None,
endpoints=[src_endpoint, dst_endpoint],
test_configurations=[test_config],
test_groups=[test_group],
outputs=outputs,
notes=notes)
return cmv2
def _create_nw_connection_monitor_v2_endpoint(cmd,
name,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
endpoint_type=None,
coverage_level=None):
if (filter_type and not filter_items) or (not filter_type and filter_items):
raise CLIError('usage error: '
'--filter-type and --filter-item for endpoint filter must be present at the same time.')
ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter')
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
return endpoint
def _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
test_frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
(ConnectionMonitorTestConfigurationProtocol,
ConnectionMonitorTestConfiguration, ConnectionMonitorSuccessThreshold) = cmd.get_models(
'ConnectionMonitorTestConfigurationProtocol',
'ConnectionMonitorTestConfiguration', 'ConnectionMonitorSuccessThreshold')
test_config = ConnectionMonitorTestConfiguration(name=name,
test_frequency_sec=test_frequency,
protocol=protocol,
preferred_ip_version=preferred_ip_version)
if threshold_failed_percent or threshold_round_trip_time:
threshold = ConnectionMonitorSuccessThreshold(checks_failed_percent=threshold_failed_percent,
round_trip_time_ms=threshold_round_trip_time)
test_config.success_threshold = threshold
if protocol == ConnectionMonitorTestConfigurationProtocol.tcp:
ConnectionMonitorTcpConfiguration = cmd.get_models('ConnectionMonitorTcpConfiguration')
tcp_config = ConnectionMonitorTcpConfiguration(
port=tcp_port,
destination_port_behavior=tcp_port_behavior,
disable_trace_route=tcp_disable_trace_route
)
test_config.tcp_configuration = tcp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.icmp:
ConnectionMonitorIcmpConfiguration = cmd.get_models('ConnectionMonitorIcmpConfiguration')
icmp_config = ConnectionMonitorIcmpConfiguration(disable_trace_route=icmp_disable_trace_route)
test_config.icmp_configuration = icmp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.http:
ConnectionMonitorHttpConfiguration = cmd.get_models('ConnectionMonitorHttpConfiguration')
http_config = ConnectionMonitorHttpConfiguration(
port=http_port,
method=http_method,
path=http_path,
request_headers=http_request_headers,
valid_status_code_ranges=http_valid_status_codes,
prefer_https=http_prefer_https)
test_config.http_configuration = http_config
else:
raise CLIError('Unsupported protocol: "{}" for test configuration'.format(protocol))
return test_config
def _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
test_configurations,
source_endpoints,
destination_endpoints):
ConnectionMonitorTestGroup = cmd.get_models('ConnectionMonitorTestGroup')
test_group = ConnectionMonitorTestGroup(name=name,
disable=disable,
test_configurations=[tc.name for tc in test_configurations],
sources=[e.name for e in source_endpoints],
destinations=[e.name for e in destination_endpoints])
return test_group
def _create_nw_connection_monitor_v2_output(cmd,
output_type,
workspace_id=None):
ConnectionMonitorOutput, OutputType = cmd.get_models('ConnectionMonitorOutput', 'OutputType')
output = ConnectionMonitorOutput(type=output_type)
if output_type == OutputType.workspace:
ConnectionMonitorWorkspaceSettings = cmd.get_models('ConnectionMonitorWorkspaceSettings')
workspace = ConnectionMonitorWorkspaceSettings(workspace_resource_id=workspace_id)
output.workspace_settings = workspace
else:
raise CLIError('Unsupported output type: "{}"'.format(output_type))
return output
def add_nw_connection_monitor_v2_endpoint(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
coverage_level=None,
endpoint_type=None,
source_test_groups=None,
dest_test_groups=None,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
address_include=None,
address_exclude=None):
(ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter,
ConnectionMonitorEndpointScope, ConnectionMonitorEndpointScopeItem) = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter',
'ConnectionMonitorEndpointScope', 'ConnectionMonitorEndpointScopeItem')
endpoint_scope = ConnectionMonitorEndpointScope(include=[], exclude=[])
for ip in address_include or []:
include_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.include.append(include_item)
for ip in address_exclude or []:
exclude_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.exclude.append(exclude_item)
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level,
scope=endpoint_scope if address_include or address_exclude else None)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.endpoints.append(endpoint)
src_test_groups, dst_test_groups = set(source_test_groups or []), set(dest_test_groups or [])
for test_group in connection_monitor.test_groups:
if test_group.name in src_test_groups:
test_group.sources.append(endpoint.name)
if test_group.name in dst_test_groups:
test_group.destinations.append(endpoint.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh endpoints
new_endpoints = [endpoint for endpoint in connection_monitor.endpoints if endpoint.name != name]
connection_monitor.endpoints = new_endpoints
# refresh test groups
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
for test_group in temp_test_groups:
if name in test_group.sources:
test_group.sources.remove(name)
if name in test_group.destinations:
test_group.destinations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for endpoint in connection_monitor.endpoints:
if endpoint.name == name:
return endpoint
raise CLIError('unknown endpoint: {}'.format(name))
def list_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.endpoints
def add_nw_connection_monitor_v2_test_configuration(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
protocol,
test_groups,
frequency=None,
threshold_failed_percent=None,
threshold_round_trip_time=None,
preferred_ip_version=None,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
new_test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port,
tcp_port_behavior,
tcp_disable_trace_route,
icmp_disable_trace_route,
http_port,
http_method,
http_path,
http_valid_status_codes,
http_prefer_https,
http_request_headers)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.test_configurations.append(new_test_config)
for test_group in connection_monitor.test_groups:
if test_group.name in test_groups:
test_group.test_configurations.append(new_test_config.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh test configurations
new_test_configurations = [t for t in connection_monitor.test_configurations if t.name != name]
connection_monitor.test_configurations = new_test_configurations
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
# refresh test groups
for test_group in temp_test_groups:
test_group.test_configurations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for test_config in connection_monitor.test_configurations:
if test_config.name == name:
return test_config
raise CLIError('unknown test configuration: {}'.format(name))
def list_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_configurations
def add_nw_connection_monitor_v2_test_group(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
location,
name,
endpoint_source_name,
endpoint_dest_name,
test_config_name,
disable=False,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None):
new_test_configuration_creation_requirements = [
test_config_protocol, test_config_preferred_ip_version,
test_config_threshold_failed_percent, test_config_threshold_round_trip_time,
test_config_tcp_disable_trace_route, test_config_tcp_port,
test_config_icmp_disable_trace_route,
test_config_http_port, test_config_http_method,
test_config_http_path, test_config_http_valid_status_codes, test_config_http_prefer_https
]
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_group = _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
[], [], [])
# deal with endpoint
if any([endpoint_source_address, endpoint_source_resource_id]):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address)
connection_monitor.endpoints.append(src_endpoint)
if any([endpoint_dest_address, endpoint_dest_resource_id]):
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address)
connection_monitor.endpoints.append(dst_endpoint)
new_test_group.sources.append(endpoint_source_name)
new_test_group.destinations.append(endpoint_dest_name)
# deal with test configuration
if any(new_test_configuration_creation_requirements):
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
connection_monitor.test_configurations.append(test_config)
new_test_group.test_configurations.append(test_config_name)
connection_monitor.test_groups.append(new_test_group)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_groups, removed_test_group = [], None
for t in connection_monitor.test_groups:
if t.name == name:
removed_test_group = t
else:
new_test_groups.append(t)
if removed_test_group is None:
raise CLIError('test group: "{}" not exist'.format(name))
connection_monitor.test_groups = new_test_groups
# deal with endpoints which are only referenced by this removed test group
removed_endpoints = []
for e in removed_test_group.sources + removed_test_group.destinations:
tmp = [t for t in connection_monitor.test_groups if (e in t.sources or e in t.destinations)]
if not tmp:
removed_endpoints.append(e)
connection_monitor.endpoints = [e for e in connection_monitor.endpoints if e.name not in removed_endpoints]
# deal with test configurations which are only referenced by this remove test group
removed_test_configurations = []
for c in removed_test_group.test_configurations:
tmp = [t for t in connection_monitor.test_groups if c in t.test_configurations]
if not tmp:
removed_test_configurations.append(c)
connection_monitor.test_configurations = [c for c in connection_monitor.test_configurations
if c.name not in removed_test_configurations]
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for t in connection_monitor.test_groups:
if t.name == name:
return t
raise CLIError('unknown test group: {}'.format(name))
def list_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_groups
def add_nw_connection_monitor_v2_output(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
out_type,
workspace_id=None):
output = _create_nw_connection_monitor_v2_output(cmd, out_type, workspace_id)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
if connection_monitor.outputs is None:
connection_monitor.outputs = []
connection_monitor.outputs.append(output)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.outputs = []
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def list_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.outputs
def show_topology_watcher(cmd, client, resource_group_name, network_watcher_name, target_resource_group_name=None,
target_vnet=None, target_subnet=None): # pylint: disable=unused-argument
TopologyParameters = cmd.get_models('TopologyParameters')
return client.get_topology(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=TopologyParameters(
target_resource_group_name=target_resource_group_name,
target_virtual_network=target_vnet,
target_subnet=target_subnet
))
def check_nw_connectivity(cmd, client, watcher_rg, watcher_name, source_resource, source_port=None,
dest_resource=None, dest_port=None, dest_address=None,
resource_group_name=None, protocol=None, method=None, headers=None, valid_status_codes=None):
ConnectivitySource, ConnectivityDestination, ConnectivityParameters, ProtocolConfiguration, HTTPConfiguration = \
cmd.get_models(
'ConnectivitySource', 'ConnectivityDestination', 'ConnectivityParameters', 'ProtocolConfiguration',
'HTTPConfiguration')
params = ConnectivityParameters(
source=ConnectivitySource(resource_id=source_resource, port=source_port),
destination=ConnectivityDestination(resource_id=dest_resource, address=dest_address, port=dest_port),
protocol=protocol
)
if any([method, headers, valid_status_codes]):
params.protocol_configuration = ProtocolConfiguration(http_configuration=HTTPConfiguration(
method=method,
headers=headers,
valid_status_codes=valid_status_codes
))
return client.begin_check_connectivity(watcher_rg, watcher_name, params)
def check_nw_ip_flow(cmd, client, vm, watcher_rg, watcher_name, direction, protocol, local, remote,
resource_group_name=None, nic=None, location=None):
VerificationIPFlowParameters = cmd.get_models('VerificationIPFlowParameters')
try:
local_ip_address, local_port = local.split(':')
remote_ip_address, remote_port = remote.split(':')
except:
raise CLIError("usage error: the format of the '--local' and '--remote' should be like x.x.x.x:port")
if not is_valid_resource_id(vm):
if not resource_group_name:
raise CLIError("usage error: --vm NAME --resource-group NAME | --vm ID")
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
if not resource_group_name:
raise CLIError("usage error: --nic NAME --resource-group NAME | --nic ID")
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_verify_ip_flow(
watcher_rg, watcher_name,
VerificationIPFlowParameters(
target_resource_id=vm, direction=direction, protocol=protocol, local_port=local_port,
remote_port=remote_port, local_ip_address=local_ip_address,
remote_ip_address=remote_ip_address, target_nic_resource_id=nic))
def show_nw_next_hop(cmd, client, resource_group_name, vm, watcher_rg, watcher_name,
source_ip, dest_ip, nic=None, location=None):
NextHopParameters = cmd.get_models('NextHopParameters')
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_get_next_hop(
watcher_rg, watcher_name, NextHopParameters(target_resource_id=vm,
source_ip_address=source_ip,
destination_ip_address=dest_ip,
target_nic_resource_id=nic))
def show_nw_security_view(cmd, client, resource_group_name, vm, watcher_rg, watcher_name, location=None):
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
security_group_view_parameters = cmd.get_models('SecurityGroupViewParameters')(target_resource_id=vm)
return client.begin_get_vm_security_rules(watcher_rg, watcher_name, security_group_view_parameters)
def create_nw_packet_capture(cmd, client, resource_group_name, capture_name, vm,
watcher_rg, watcher_name, location=None,
storage_account=None, storage_path=None, file_path=None,
capture_size=None, capture_limit=None, time_limit=None, filters=None):
PacketCapture, PacketCaptureStorageLocation = cmd.get_models('PacketCapture', 'PacketCaptureStorageLocation')
storage_settings = PacketCaptureStorageLocation(storage_id=storage_account,
storage_path=storage_path, file_path=file_path)
capture_params = PacketCapture(target=vm, storage_location=storage_settings,
bytes_to_capture_per_packet=capture_size,
total_bytes_per_session=capture_limit, time_limit_in_seconds=time_limit,
filters=filters)
return client.begin_create(watcher_rg, watcher_name, capture_name, capture_params)
def set_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, nsg, storage_account=None,
resource_group_name=None, enabled=None, retention=0, log_format=None, log_version=None,
traffic_analytics_workspace=None, traffic_analytics_interval=None,
traffic_analytics_enabled=None):
from azure.cli.core.commands import LongRunningOperation
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
config = LongRunningOperation(cmd.cli_ctx)(client.begin_get_flow_log_status(watcher_rg,
watcher_name,
flowlog_status_parameters))
try:
if not config.flow_analytics_configuration.network_watcher_flow_analytics_configuration.workspace_id:
config.flow_analytics_configuration = None
except AttributeError:
config.flow_analytics_configuration = None
with cmd.update_context(config) as c:
c.set_param('enabled', enabled if enabled is not None else config.enabled)
c.set_param('storage_id', storage_account or config.storage_id)
if retention is not None:
config.retention_policy = {
'days': retention,
'enabled': int(retention) > 0
}
if cmd.supported_api_version(min_api='2018-10-01') and (log_format or log_version):
config.format = {
'type': log_format,
'version': log_version
}
if cmd.supported_api_version(min_api='2018-10-01') and \
any([traffic_analytics_workspace is not None, traffic_analytics_enabled is not None]):
workspace = None
if traffic_analytics_workspace:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not config.flow_analytics_configuration:
# must create whole object
if not workspace:
raise CLIError('usage error (analytics not already configured): --workspace NAME_OR_ID '
'[--enabled {true|false}]')
if traffic_analytics_enabled is None:
traffic_analytics_enabled = True
config.flow_analytics_configuration = {
'network_watcher_flow_analytics_configuration': {
'enabled': traffic_analytics_enabled,
'workspace_id': workspace.properties['customerId'],
'workspace_region': workspace.location,
'workspace_resource_id': traffic_analytics_workspace,
'traffic_analytics_interval': traffic_analytics_interval
}
}
else:
# pylint: disable=line-too-long
with cmd.update_context(config.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
# update object
c.set_param('enabled', traffic_analytics_enabled)
if traffic_analytics_workspace == "":
config.flow_analytics_configuration = None
elif workspace:
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', traffic_analytics_workspace)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return client.begin_set_flow_log_configuration(watcher_rg, watcher_name, config)
# combination of resource_group_name and nsg is for old output
# combination of location and flow_log_name is for new output
def show_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, location=None, resource_group_name=None, nsg=None,
flow_log_name=None):
# deprecated approach to show flow log
if nsg is not None:
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
return client.begin_get_flow_log_status(watcher_rg, watcher_name, flowlog_status_parameters)
# new approach to show flow log
from ._client_factory import cf_flow_logs
client = cf_flow_logs(cmd.cli_ctx, None)
return client.get(watcher_rg, watcher_name, flow_log_name)
def create_nw_flow_log(cmd,
client,
location,
watcher_rg,
watcher_name,
flow_log_name,
nsg,
storage_account=None,
resource_group_name=None,
enabled=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
FlowLog = cmd.get_models('FlowLog')
flow_log = FlowLog(location=location,
target_resource_id=nsg,
storage_id=storage_account,
enabled=enabled,
tags=tags)
if retention > 0:
RetentionPolicyParameters = cmd.get_models('RetentionPolicyParameters')
retention_policy = RetentionPolicyParameters(days=retention, enabled=(retention > 0))
flow_log.retention_policy = retention_policy
if log_format is not None or log_version is not None:
FlowLogFormatParameters = cmd.get_models('FlowLogFormatParameters')
format_config = FlowLogFormatParameters(type=log_format, version=log_version)
flow_log.format = format_config
if traffic_analytics_workspace is not None:
TrafficAnalyticsProperties, TrafficAnalyticsConfigurationProperties = \
cmd.get_models('TrafficAnalyticsProperties', 'TrafficAnalyticsConfigurationProperties')
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
traffic_analytics_config = TrafficAnalyticsConfigurationProperties(
enabled=traffic_analytics_enabled,
workspace_id=workspace.properties['customerId'],
workspace_region=workspace.location,
workspace_resource_id=workspace.id,
traffic_analytics_interval=traffic_analytics_interval
)
traffic_analytics = TrafficAnalyticsProperties(
network_watcher_flow_analytics_configuration=traffic_analytics_config
)
flow_log.flow_analytics_configuration = traffic_analytics
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, flow_log)
def update_nw_flow_log_getter(client, watcher_rg, watcher_name, flow_log_name):
return client.get(watcher_rg, watcher_name, flow_log_name)
def update_nw_flow_log_setter(client, watcher_rg, watcher_name, flow_log_name, parameters):
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, parameters)
def update_nw_flow_log(cmd,
instance,
location,
resource_group_name=None, # dummy parameter to let it appear in command
enabled=None,
nsg=None,
storage_account=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
with cmd.update_context(instance) as c:
c.set_param('enabled', enabled)
c.set_param('tags', tags)
c.set_param('storage_id', storage_account)
c.set_param('target_resource_id', nsg)
with cmd.update_context(instance.retention_policy) as c:
c.set_param('days', retention)
c.set_param('enabled', retention > 0)
with cmd.update_context(instance.format) as c:
c.set_param('type', log_format)
c.set_param('version', log_version)
if traffic_analytics_workspace is not None:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
if instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration is None:
analytics_conf = cmd.get_models('TrafficAnalyticsConfigurationProperties')
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration = analytics_conf()
with cmd.update_context(
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
c.set_param('enabled', traffic_analytics_enabled)
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', workspace.id)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return instance
def list_nw_flow_log(client, watcher_rg, watcher_name, location):
return client.list(watcher_rg, watcher_name)
def delete_nw_flow_log(client, watcher_rg, watcher_name, location, flow_log_name):
return client.begin_delete(watcher_rg, watcher_name, flow_log_name)
def start_nw_troubleshooting(cmd, client, watcher_name, watcher_rg, resource, storage_account,
storage_path, resource_type=None, resource_group_name=None,
no_wait=False):
TroubleshootingParameters = cmd.get_models('TroubleshootingParameters')
params = TroubleshootingParameters(target_resource_id=resource, storage_id=storage_account,
storage_path=storage_path)
return sdk_no_wait(no_wait, client.begin_get_troubleshooting, watcher_rg, watcher_name, params)
def show_nw_troubleshooting_result(cmd, client, watcher_name, watcher_rg, resource, resource_type=None,
resource_group_name=None):
query_troubleshooting_parameters = cmd.get_models('QueryTroubleshootingParameters')(target_resource_id=resource)
return client.begin_get_troubleshooting_result(watcher_rg, watcher_name, query_troubleshooting_parameters)
def run_network_configuration_diagnostic(cmd, client, watcher_rg, watcher_name, resource,
direction=None, protocol=None, source=None, destination=None,
destination_port=None, queries=None,
resource_group_name=None, resource_type=None, parent=None):
NetworkConfigurationDiagnosticParameters, NetworkConfigurationDiagnosticProfile = \
cmd.get_models('NetworkConfigurationDiagnosticParameters', 'NetworkConfigurationDiagnosticProfile')
if not queries:
queries = [NetworkConfigurationDiagnosticProfile(
direction=direction,
protocol=protocol,
source=source,
destination=destination,
destination_port=destination_port
)]
params = NetworkConfigurationDiagnosticParameters(target_resource_id=resource, profiles=queries)
return client.begin_get_network_configuration_diagnostic(watcher_rg, watcher_name, params)
# endregion
# region CustomIpPrefix
def create_custom_ip_prefix(cmd, client, resource_group_name, custom_ip_prefix_name, location=None,
cidr=None, tags=None, zone=None, signed_message=None, authorization_message=None,
custom_ip_prefix_parent=None, no_wait=False):
CustomIpPrefix = cmd.get_models('CustomIpPrefix')
prefix = CustomIpPrefix(
location=location,
cidr=cidr,
zones=zone,
tags=tags,
signed_message=signed_message,
authorization_message=authorization_message
)
if custom_ip_prefix_parent:
try:
prefix.custom_ip_prefix_parent = client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Custom ip prefix parent {} doesn't exist".format(custom_ip_prefix_name))
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, custom_ip_prefix_name, prefix)
def update_custom_ip_prefix(instance, signed_message=None, authorization_message=None, tags=None):
if tags is not None:
instance.tags = tags
if signed_message is not None:
instance.signed_message = signed_message
if authorization_message is not None:
instance.authorization_message = authorization_message
return instance
# endregion
# region PublicIPAddresses
def create_public_ip(cmd, resource_group_name, public_ip_address_name, location=None, tags=None,
allocation_method=None, dns_name=None,
idle_timeout=4, reverse_fqdn=None, version=None, sku=None, tier=None, zone=None, ip_tags=None,
public_ip_prefix=None, edge_zone=None, ip_address=None):
IPAllocationMethod, PublicIPAddress, PublicIPAddressDnsSettings, SubResource = cmd.get_models(
'IPAllocationMethod', 'PublicIPAddress', 'PublicIPAddressDnsSettings', 'SubResource')
client = network_client_factory(cmd.cli_ctx).public_ip_addresses
if not allocation_method:
allocation_method = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
public_ip_args = {
'location': location,
'tags': tags,
'public_ip_allocation_method': allocation_method,
'idle_timeout_in_minutes': idle_timeout,
'ip_address': ip_address,
'dns_settings': None
}
if cmd.supported_api_version(min_api='2016-09-01'):
public_ip_args['public_ip_address_version'] = version
if cmd.supported_api_version(min_api='2017-06-01'):
public_ip_args['zones'] = zone
if cmd.supported_api_version(min_api='2017-11-01'):
public_ip_args['ip_tags'] = ip_tags
if cmd.supported_api_version(min_api='2018-07-01') and public_ip_prefix:
public_ip_args['public_ip_prefix'] = SubResource(id=public_ip_prefix)
if sku:
public_ip_args['sku'] = {'name': sku}
if tier:
if not sku:
public_ip_args['sku'] = {'name': 'Basic'}
public_ip_args['sku'].update({'tier': tier})
public_ip = PublicIPAddress(**public_ip_args)
if dns_name or reverse_fqdn:
public_ip.dns_settings = PublicIPAddressDnsSettings(
domain_name_label=dns_name,
reverse_fqdn=reverse_fqdn)
if edge_zone:
public_ip.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_address_name, public_ip)
def update_public_ip(cmd, instance, dns_name=None, allocation_method=None, version=None,
idle_timeout=None, reverse_fqdn=None, tags=None, sku=None, ip_tags=None,
public_ip_prefix=None):
if dns_name is not None or reverse_fqdn is not None:
if instance.dns_settings:
if dns_name is not None:
instance.dns_settings.domain_name_label = dns_name
if reverse_fqdn is not None:
instance.dns_settings.reverse_fqdn = reverse_fqdn
else:
PublicIPAddressDnsSettings = cmd.get_models('PublicIPAddressDnsSettings')
instance.dns_settings = PublicIPAddressDnsSettings(domain_name_label=dns_name, fqdn=None,
reverse_fqdn=reverse_fqdn)
if allocation_method is not None:
instance.public_ip_allocation_method = allocation_method
if version is not None:
instance.public_ip_address_version = version
if idle_timeout is not None:
instance.idle_timeout_in_minutes = idle_timeout
if tags is not None:
instance.tags = tags
if sku is not None:
instance.sku.name = sku
if ip_tags:
instance.ip_tags = ip_tags
if public_ip_prefix:
SubResource = cmd.get_models('SubResource')
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return instance
def create_public_ip_prefix(cmd, client, resource_group_name, public_ip_prefix_name, prefix_length,
version=None, location=None, tags=None, zone=None, edge_zone=None,
custom_ip_prefix_name=None):
PublicIPPrefix, PublicIPPrefixSku = cmd.get_models('PublicIPPrefix', 'PublicIPPrefixSku')
prefix = PublicIPPrefix(
location=location,
prefix_length=prefix_length,
sku=PublicIPPrefixSku(name='Standard'),
tags=tags,
zones=zone
)
if cmd.supported_api_version(min_api='2019-08-01'):
prefix.public_ip_address_version = version if version is not None else 'ipv4'
if cmd.supported_api_version(min_api='2020-06-01') and custom_ip_prefix_name:
cip_client = network_client_factory(cmd.cli_ctx).custom_ip_prefixes
try:
prefix.custom_ip_prefix = cip_client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError('Custom ip prefix {} doesn\'t exist.'.format(custom_ip_prefix_name))
if edge_zone:
prefix.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_prefix_name, prefix)
def update_public_ip_prefix(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region RouteFilters
def create_route_filter(cmd, client, resource_group_name, route_filter_name, location=None, tags=None):
RouteFilter = cmd.get_models('RouteFilter')
return client.begin_create_or_update(resource_group_name, route_filter_name,
RouteFilter(location=location, tags=tags))
def list_route_filters(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_route_filter_rule(cmd, client, resource_group_name, route_filter_name, rule_name, access, communities,
location=None):
RouteFilterRule = cmd.get_models('RouteFilterRule')
return client.begin_create_or_update(resource_group_name, route_filter_name, rule_name,
RouteFilterRule(access=access, communities=communities,
location=location))
# endregion
# region RouteTables
def create_route_table(cmd, resource_group_name, route_table_name, location=None, tags=None,
disable_bgp_route_propagation=None):
RouteTable = cmd.get_models('RouteTable')
ncf = network_client_factory(cmd.cli_ctx)
route_table = RouteTable(location=location, tags=tags)
if cmd.supported_api_version(min_api='2017-10-01'):
route_table.disable_bgp_route_propagation = disable_bgp_route_propagation
return ncf.route_tables.begin_create_or_update(resource_group_name, route_table_name, route_table)
def update_route_table(instance, tags=None, disable_bgp_route_propagation=None):
if tags == '':
instance.tags = None
elif tags is not None:
instance.tags = tags
if disable_bgp_route_propagation is not None:
instance.disable_bgp_route_propagation = disable_bgp_route_propagation
return instance
def create_route(cmd, resource_group_name, route_table_name, route_name, next_hop_type, address_prefix,
next_hop_ip_address=None):
Route = cmd.get_models('Route')
route = Route(next_hop_type=next_hop_type, address_prefix=address_prefix,
next_hop_ip_address=next_hop_ip_address, name=route_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.routes.begin_create_or_update(resource_group_name, route_table_name, route_name, route)
def update_route(instance, address_prefix=None, next_hop_type=None, next_hop_ip_address=None):
if address_prefix is not None:
instance.address_prefix = address_prefix
if next_hop_type is not None:
instance.next_hop_type = next_hop_type
if next_hop_ip_address is not None:
instance.next_hop_ip_address = next_hop_ip_address
return instance
# endregion
# region ServiceEndpoints
def create_service_endpoint_policy(cmd, resource_group_name, service_endpoint_policy_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
ServiceEndpointPolicy = cmd.get_models('ServiceEndpointPolicy')
policy = ServiceEndpointPolicy(tags=tags, location=location)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name, policy)
def list_service_endpoint_policies(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_service_endpoint_policy(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def create_service_endpoint_policy_definition(cmd, resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, service, service_resources,
description=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policy_definitions
ServiceEndpointPolicyDefinition = cmd.get_models('ServiceEndpointPolicyDefinition')
policy_def = ServiceEndpointPolicyDefinition(description=description, service=service,
service_resources=service_resources)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, policy_def)
def update_service_endpoint_policy_definition(instance, service=None, service_resources=None, description=None):
if service is not None:
instance.service = service
if service_resources is not None:
instance.service_resources = service_resources
if description is not None:
instance.description = description
return instance
# endregion
# region TrafficManagers
def list_traffic_manager_profiles(cmd, resource_group_name=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_traffic_manager_profile(cmd, traffic_manager_profile_name, resource_group_name,
routing_method, unique_dns_name, monitor_path=None,
monitor_port=80, monitor_protocol=MonitorProtocol.http.value,
profile_status=ProfileStatus.enabled.value,
ttl=30, tags=None, interval=None, timeout=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Profile, DnsConfig, MonitorConfig
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if monitor_path is None and monitor_protocol == 'HTTP':
monitor_path = '/'
profile = Profile(location='global', tags=tags, profile_status=profile_status,
traffic_routing_method=routing_method,
dns_config=DnsConfig(relative_name=unique_dns_name, ttl=ttl),
monitor_config=MonitorConfig(protocol=monitor_protocol,
port=monitor_port,
path=monitor_path,
interval_in_seconds=interval,
timeout_in_seconds=timeout,
tolerated_number_of_failures=max_failures,
custom_headers=monitor_custom_headers,
expected_status_code_ranges=status_code_ranges),
max_return=max_return)
return client.create_or_update(resource_group_name, traffic_manager_profile_name, profile)
def update_traffic_manager_profile(instance, profile_status=None, routing_method=None, tags=None,
monitor_protocol=None, monitor_port=None, monitor_path=None,
ttl=None, timeout=None, interval=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
if tags is not None:
instance.tags = tags
if profile_status is not None:
instance.profile_status = profile_status
if routing_method is not None:
instance.traffic_routing_method = routing_method
if ttl is not None:
instance.dns_config.ttl = ttl
if monitor_protocol is not None:
instance.monitor_config.protocol = monitor_protocol
if monitor_port is not None:
instance.monitor_config.port = monitor_port
if monitor_path == '':
instance.monitor_config.path = None
elif monitor_path is not None:
instance.monitor_config.path = monitor_path
if interval is not None:
instance.monitor_config.interval_in_seconds = interval
if timeout is not None:
instance.monitor_config.timeout_in_seconds = timeout
if max_failures is not None:
instance.monitor_config.tolerated_number_of_failures = max_failures
if monitor_custom_headers is not None:
instance.monitor_config.custom_headers = monitor_custom_headers
if status_code_ranges is not None:
instance.monitor_config.expected_status_code_ranges = status_code_ranges
if max_return is not None:
instance.max_return = max_return
# TODO: Remove workaround after https://github.com/Azure/azure-rest-api-specs/issues/1940 fixed
for endpoint in instance.endpoints:
endpoint._validation = { # pylint: disable=protected-access
'name': {'readonly': False},
'type': {'readonly': False},
}
return instance
def create_traffic_manager_endpoint(cmd, resource_group_name, profile_name, endpoint_type, endpoint_name,
target_resource_id=None, target=None,
endpoint_status=None, weight=None, priority=None,
endpoint_location=None, endpoint_monitor_status=None,
min_child_endpoints=None, geo_mapping=None,
monitor_custom_headers=None, subnets=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Endpoint
ncf = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).endpoints
endpoint = Endpoint(target_resource_id=target_resource_id, target=target,
endpoint_status=endpoint_status, weight=weight, priority=priority,
endpoint_location=endpoint_location,
endpoint_monitor_status=endpoint_monitor_status,
min_child_endpoints=min_child_endpoints,
geo_mapping=geo_mapping,
subnets=subnets,
custom_headers=monitor_custom_headers)
return ncf.create_or_update(resource_group_name, profile_name, endpoint_type, endpoint_name,
endpoint)
def update_traffic_manager_endpoint(instance, endpoint_type=None, endpoint_location=None,
endpoint_status=None, endpoint_monitor_status=None,
priority=None, target=None, target_resource_id=None,
weight=None, min_child_endpoints=None, geo_mapping=None,
subnets=None, monitor_custom_headers=None):
if endpoint_location is not None:
instance.endpoint_location = endpoint_location
if endpoint_status is not None:
instance.endpoint_status = endpoint_status
if endpoint_monitor_status is not None:
instance.endpoint_monitor_status = endpoint_monitor_status
if priority is not None:
instance.priority = priority
if target is not None:
instance.target = target
if target_resource_id is not None:
instance.target_resource_id = target_resource_id
if weight is not None:
instance.weight = weight
if min_child_endpoints is not None:
instance.min_child_endpoints = min_child_endpoints
if geo_mapping is not None:
instance.geo_mapping = geo_mapping
if subnets is not None:
instance.subnets = subnets
if monitor_custom_headers:
instance.custom_headers = monitor_custom_headers
return instance
def list_traffic_manager_endpoints(cmd, resource_group_name, profile_name, endpoint_type=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
profile = client.get(resource_group_name, profile_name)
return [e for e in profile.endpoints if not endpoint_type or e.type.endswith(endpoint_type)]
# endregion
# region VirtualNetworks
# pylint: disable=too-many-locals
def create_vnet(cmd, resource_group_name, vnet_name, vnet_prefixes='10.0.0.0/16',
subnet_name=None, subnet_prefix=None, dns_servers=None,
location=None, tags=None, vm_protection=None, ddos_protection=None,
ddos_protection_plan=None, network_security_group=None, edge_zone=None, flowtimeout=None):
AddressSpace, DhcpOptions, Subnet, VirtualNetwork, SubResource, NetworkSecurityGroup = \
cmd.get_models('AddressSpace', 'DhcpOptions', 'Subnet', 'VirtualNetwork',
'SubResource', 'NetworkSecurityGroup')
client = network_client_factory(cmd.cli_ctx).virtual_networks
tags = tags or {}
vnet = VirtualNetwork(
location=location, tags=tags,
dhcp_options=DhcpOptions(dns_servers=dns_servers),
address_space=AddressSpace(address_prefixes=(vnet_prefixes if isinstance(vnet_prefixes, list) else [vnet_prefixes]))) # pylint: disable=line-too-long
if subnet_name:
if cmd.supported_api_version(min_api='2018-08-01'):
vnet.subnets = [Subnet(name=subnet_name,
address_prefix=subnet_prefix[0] if len(subnet_prefix) == 1 else None,
address_prefixes=subnet_prefix if len(subnet_prefix) > 1 else None,
network_security_group=NetworkSecurityGroup(id=network_security_group)
if network_security_group else None)]
else:
vnet.subnets = [Subnet(name=subnet_name, address_prefix=subnet_prefix)]
if cmd.supported_api_version(min_api='2017-09-01'):
vnet.enable_ddos_protection = ddos_protection
vnet.enable_vm_protection = vm_protection
if cmd.supported_api_version(min_api='2018-02-01'):
vnet.ddos_protection_plan = SubResource(id=ddos_protection_plan) if ddos_protection_plan else None
if edge_zone:
vnet.extended_location = _edge_zone_model(cmd, edge_zone)
if flowtimeout is not None:
vnet.flow_timeout_in_minutes = flowtimeout
return cached_put(cmd, client.begin_create_or_update, vnet, resource_group_name, vnet_name)
def update_vnet(cmd, instance, vnet_prefixes=None, dns_servers=None, ddos_protection=None, vm_protection=None,
ddos_protection_plan=None, flowtimeout=None):
# server side validation reports pretty good error message on invalid CIDR,
# so we don't validate at client side
AddressSpace, DhcpOptions, SubResource = cmd.get_models('AddressSpace', 'DhcpOptions', 'SubResource')
if vnet_prefixes and instance.address_space:
instance.address_space.address_prefixes = vnet_prefixes
elif vnet_prefixes:
instance.address_space = AddressSpace(address_prefixes=vnet_prefixes)
if dns_servers == ['']:
instance.dhcp_options.dns_servers = None
elif dns_servers and instance.dhcp_options:
instance.dhcp_options.dns_servers = dns_servers
elif dns_servers:
instance.dhcp_options = DhcpOptions(dns_servers=dns_servers)
if ddos_protection is not None:
instance.enable_ddos_protection = ddos_protection
if vm_protection is not None:
instance.enable_vm_protection = vm_protection
if ddos_protection_plan == '':
instance.ddos_protection_plan = None
elif ddos_protection_plan is not None:
instance.ddos_protection_plan = SubResource(id=ddos_protection_plan)
if flowtimeout is not None:
instance.flow_timeout_in_minutes = flowtimeout
return instance
def _set_route_table(ncf, resource_group_name, route_table, subnet):
if route_table:
is_id = is_valid_resource_id(route_table)
rt = None
if is_id:
res_id = parse_resource_id(route_table)
rt = ncf.route_tables.get(res_id['resource_group'], res_id['name'])
else:
rt = ncf.route_tables.get(resource_group_name, route_table)
subnet.route_table = rt
elif route_table == '':
subnet.route_table = None
def create_subnet(cmd, resource_group_name, virtual_network_name, subnet_name,
address_prefix, network_security_group=None,
route_table=None, service_endpoints=None, service_endpoint_policy=None,
delegations=None, nat_gateway=None,
disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, Subnet, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-08-01'):
subnet = Subnet(
name=subnet_name,
address_prefixes=address_prefix if len(address_prefix) > 1 else None,
address_prefix=address_prefix[0] if len(address_prefix) == 1 else None
)
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
subnet.nat_gateway = SubResource(id=nat_gateway)
else:
subnet = Subnet(name=subnet_name, address_prefix=address_prefix)
if network_security_group:
subnet.network_security_group = NetworkSecurityGroup(id=network_security_group)
_set_route_table(ncf, resource_group_name, route_table, subnet)
if service_endpoints:
subnet.service_endpoints = []
for service in service_endpoints:
subnet.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy:
subnet.service_endpoint_policies = []
for policy in service_endpoint_policy:
subnet.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
subnet.delegations = delegations
if disable_private_endpoint_network_policies is True:
subnet.private_endpoint_network_policies = "Disabled"
if disable_private_endpoint_network_policies is False:
subnet.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies is True:
subnet.private_link_service_network_policies = "Disabled"
if disable_private_link_service_network_policies is False:
subnet.private_link_service_network_policies = "Enabled"
vnet = cached_get(cmd, ncf.virtual_networks.get, resource_group_name, virtual_network_name)
upsert_to_collection(vnet, 'subnets', subnet, 'name')
vnet = cached_put(
cmd, ncf.virtual_networks.begin_create_or_update, vnet, resource_group_name, virtual_network_name).result()
return get_property(vnet.subnets, subnet_name)
def update_subnet(cmd, instance, resource_group_name, address_prefix=None, network_security_group=None,
route_table=None, service_endpoints=None, delegations=None, nat_gateway=None,
service_endpoint_policy=None, disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'SubResource')
if address_prefix:
if cmd.supported_api_version(min_api='2018-08-01'):
instance.address_prefixes = address_prefix if len(address_prefix) > 1 else None
instance.address_prefix = address_prefix[0] if len(address_prefix) == 1 else None
else:
instance.address_prefix = address_prefix
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
instance.nat_gateway = SubResource(id=nat_gateway)
elif nat_gateway == '':
instance.nat_gateway = None
if network_security_group:
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
elif network_security_group == '': # clear it
instance.network_security_group = None
_set_route_table(network_client_factory(cmd.cli_ctx), resource_group_name, route_table, instance)
if service_endpoints == ['']:
instance.service_endpoints = None
elif service_endpoints:
instance.service_endpoints = []
for service in service_endpoints:
instance.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy == '':
instance.service_endpoint_policies = None
elif service_endpoint_policy:
instance.service_endpoint_policies = []
for policy in service_endpoint_policy:
instance.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
instance.delegations = delegations
if disable_private_endpoint_network_policies:
instance.private_endpoint_network_policies = "Disabled"
elif disable_private_endpoint_network_policies is not None:
instance.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies:
instance.private_link_service_network_policies = "Disabled"
elif disable_private_link_service_network_policies is not None:
instance.private_link_service_network_policies = "Enabled"
return instance
def list_avail_subnet_delegations(cmd, resource_group_name=None, location=None):
client = network_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.available_resource_group_delegations.list(location, resource_group_name)
return client.available_delegations.list(location)
def create_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name,
remote_virtual_network, allow_virtual_network_access=False,
allow_forwarded_traffic=False, allow_gateway_transit=False,
use_remote_gateways=False):
if not is_valid_resource_id(remote_virtual_network):
remote_virtual_network = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=remote_virtual_network
)
SubResource, VirtualNetworkPeering = cmd.get_models('SubResource', 'VirtualNetworkPeering')
peering = VirtualNetworkPeering(
id=resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=virtual_network_name),
name=virtual_network_peering_name,
remote_virtual_network=SubResource(id=remote_virtual_network),
allow_virtual_network_access=allow_virtual_network_access,
allow_gateway_transit=allow_gateway_transit,
allow_forwarded_traffic=allow_forwarded_traffic,
use_remote_gateways=use_remote_gateways)
aux_subscription = parse_resource_id(remote_virtual_network)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def update_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name, **kwargs):
peering = kwargs['parameters']
aux_subscription = parse_resource_id(peering.remote_virtual_network.id)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def list_available_ips(cmd, resource_group_name, virtual_network_name):
client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet = client.get(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name)
start_ip = vnet.address_space.address_prefixes[0].split('/')[0]
available_ips = client.check_ip_address_availability(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
ip_address=start_ip)
return available_ips.available_ip_addresses
# endregion
# region VirtualNetworkGateways
def create_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, public_cert_data, cert_name):
VpnClientRootCertificate = cmd.get_models('VpnClientRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
raise CLIError("Must add address prefixes to gateway '{}' prior to adding a root cert."
.format(gateway_name))
config = gateway.vpn_client_configuration
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
cert = VpnClientRootCertificate(name=cert_name, public_cert_data=public_cert_data)
upsert_to_collection(config, 'vpn_client_root_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_root_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_root_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def create_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, thumbprint, cert_name):
VpnClientRevokedCertificate = cmd.get_models('VpnClientRevokedCertificate')
config, gateway, ncf = _prep_cert_create(cmd, gateway_name, resource_group_name)
cert = VpnClientRevokedCertificate(name=cert_name, thumbprint=thumbprint)
upsert_to_collection(config, 'vpn_client_revoked_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_revoked_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_revoked_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def _prep_cert_create(cmd, gateway_name, resource_group_name):
VpnClientConfiguration = cmd.get_models('VpnClientConfiguration')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
gateway.vpn_client_configuration = VpnClientConfiguration()
config = gateway.vpn_client_configuration
if not config.vpn_client_address_pool or not config.vpn_client_address_pool.address_prefixes:
raise CLIError('Address prefixes must be set on VPN gateways before adding'
' certificates. Please use "update" with --address-prefixes first.')
if config.vpn_client_revoked_certificates is None:
config.vpn_client_revoked_certificates = []
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
return config, gateway, ncf
def create_vnet_gateway(cmd, resource_group_name, virtual_network_gateway_name, public_ip_address,
virtual_network, location=None, tags=None,
no_wait=False, gateway_type=None, sku=None, vpn_type=None, vpn_gateway_generation=None,
asn=None, bgp_peering_address=None, peer_weight=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None, edge_zone=None,
nat_rule=None):
(VirtualNetworkGateway, BgpSettings, SubResource, VirtualNetworkGatewayIPConfiguration, VirtualNetworkGatewaySku,
VpnClientConfiguration, AddressSpace, VpnClientRootCertificate, VirtualNetworkGatewayNatRule,
VpnNatRuleMapping) = cmd.get_models(
'VirtualNetworkGateway', 'BgpSettings', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku', 'VpnClientConfiguration', 'AddressSpace', 'VpnClientRootCertificate',
'VirtualNetworkGatewayNatRule', 'VpnNatRuleMapping')
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
subnet = virtual_network + '/subnets/GatewaySubnet'
active = len(public_ip_address) == 2
vnet_gateway = VirtualNetworkGateway(
gateway_type=gateway_type, vpn_type=vpn_type, vpn_gateway_generation=vpn_gateway_generation, location=location,
tags=tags, sku=VirtualNetworkGatewaySku(name=sku, tier=sku), active=active, ip_configurations=[],
gateway_default_site=SubResource(id=gateway_default_site) if gateway_default_site else None)
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic',
name='vnetGatewayConfig{}'.format(i)
)
vnet_gateway.ip_configurations.append(ip_configuration)
if asn or bgp_peering_address or peer_weight:
vnet_gateway.enable_bgp = True
vnet_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
if any((address_prefixes, client_protocol)):
vnet_gateway.vpn_client_configuration = VpnClientConfiguration()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
vnet_gateway.vpn_client_configuration.vpn_client_protocols = client_protocol
if any((radius_secret, radius_server)) and cmd.supported_api_version(min_api='2017-06-01'):
vnet_gateway.vpn_client_configuration.radius_server_address = radius_server
vnet_gateway.vpn_client_configuration.radius_server_secret = radius_secret
# multi authentication
if cmd.supported_api_version(min_api='2020-11-01'):
vnet_gateway.vpn_client_configuration.vpn_authentication_types = vpn_auth_type
vnet_gateway.vpn_client_configuration.aad_tenant = aad_tenant
vnet_gateway.vpn_client_configuration.aad_issuer = aad_issuer
vnet_gateway.vpn_client_configuration.aad_audience = aad_audience
vnet_gateway.vpn_client_configuration.vpn_client_root_certificates = [
VpnClientRootCertificate(name=root_cert_name,
public_cert_data=root_cert_data)] if root_cert_data else None
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
vnet_gateway.custom_routes = AddressSpace()
vnet_gateway.custom_routes.address_prefixes = custom_routes
if edge_zone:
vnet_gateway.extended_location = _edge_zone_model(cmd, edge_zone)
if nat_rule:
vnet_gateway.nat_rules = [
VirtualNetworkGatewayNatRule(type_properties_type=rule.get('type'), mode=rule.get('mode'), name=rule.get('name'),
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('internal_mappings')] if rule.get('internal_mappings') else None,
external_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('external_mappings')] if rule.get('external_mappings') else None,
ip_configuration_id=rule.get('ip_config_id')) for rule in nat_rule]
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, virtual_network_gateway_name, vnet_gateway)
def update_vnet_gateway(cmd, instance, sku=None, vpn_type=None, tags=None,
public_ip_address=None, gateway_type=None, enable_bgp=None,
asn=None, bgp_peering_address=None, peer_weight=None, virtual_network=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None):
(AddressSpace, SubResource, VirtualNetworkGatewayIPConfiguration, VpnClientConfiguration,
VpnClientRootCertificate) = cmd.get_models('AddressSpace', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VpnClientConfiguration', 'VpnClientRootCertificate')
if any((address_prefixes, radius_server, radius_secret, client_protocol)) and not instance.vpn_client_configuration:
instance.vpn_client_configuration = VpnClientConfiguration()
if address_prefixes is not None:
if not instance.vpn_client_configuration.vpn_client_address_pool:
instance.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
if not instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes:
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = []
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
with cmd.update_context(instance.vpn_client_configuration) as c:
c.set_param('vpn_client_protocols', client_protocol)
c.set_param('radius_server_address', radius_server)
c.set_param('radius_server_secret', radius_secret)
if cmd.supported_api_version(min_api='2020-11-01'):
c.set_param('aad_tenant', aad_tenant)
c.set_param('aad_audience', aad_audience)
c.set_param('aad_issuer', aad_issuer)
c.set_param('vpn_authentication_types', vpn_auth_type)
if root_cert_data and cmd.supported_api_version(min_api='2020-11-01'):
upsert_to_collection(instance.vpn_client_configuration, 'vpn_client_root_certificates',
VpnClientRootCertificate(name=root_cert_name, public_cert_data=root_cert_data), 'name')
with cmd.update_context(instance.sku) as c:
c.set_param('name', sku)
c.set_param('tier', sku)
with cmd.update_context(instance) as c:
c.set_param('gateway_default_site', SubResource(id=gateway_default_site) if gateway_default_site else None)
c.set_param('vpn_type', vpn_type)
c.set_param('tags', tags)
subnet_id = '{}/subnets/GatewaySubnet'.format(virtual_network) if virtual_network else \
instance.ip_configurations[0].subnet.id
if virtual_network is not None:
for config in instance.ip_configurations:
config.subnet.id = subnet_id
if public_ip_address is not None:
instance.ip_configurations = []
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet_id),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic', name='vnetGatewayConfig{}'.format(i))
instance.ip_configurations.append(ip_configuration)
# Update active-active/active-standby status
active = len(public_ip_address) == 2
if instance.active and not active:
logger.info('Placing gateway in active-standby mode.')
elif not instance.active and active:
logger.info('Placing gateway in active-active mode.')
instance.active = active
if gateway_type is not None:
instance.gateway_type = gateway_type
if enable_bgp is not None:
instance.enable_bgp = enable_bgp.lower() == 'true'
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
if not instance.custom_routes:
instance.custom_routes = AddressSpace()
instance.custom_routes.address_prefixes = custom_routes
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
return instance
def start_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def stop_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def generate_vpn_client(cmd, client, resource_group_name, virtual_network_gateway_name, processor_architecture=None,
authentication_method=None, radius_server_auth_certificate=None, client_root_certificates=None,
use_legacy=False):
params = cmd.get_models('VpnClientParameters')(
processor_architecture=processor_architecture
)
if cmd.supported_api_version(min_api='2017-06-01') and not use_legacy:
params.authentication_method = authentication_method
params.radius_server_auth_certificate = radius_server_auth_certificate
params.client_root_certificates = client_root_certificates
return client.begin_generate_vpn_profile(resource_group_name, virtual_network_gateway_name, params)
# legacy implementation
return client.begin_generatevpnclientpackage(resource_group_name, virtual_network_gateway_name, params)
def set_vpn_client_ipsec_policy(cmd, client, resource_group_name, virtual_network_gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
VpnClientIPsecParameters = cmd.get_models('VpnClientIPsecParameters')
vpnclient_ipsec_params = VpnClientIPsecParameters(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
return sdk_no_wait(no_wait, client.begin_set_vpnclient_ipsec_parameters, resource_group_name,
virtual_network_gateway_name, vpnclient_ipsec_params)
def disconnect_vnet_gateway_vpn_connections(cmd, client, resource_group_name, virtual_network_gateway_name,
vpn_connection_ids, no_wait=False):
P2SVpnConnectionRequest = cmd.get_models('P2SVpnConnectionRequest')
request = P2SVpnConnectionRequest(vpn_connection_ids=vpn_connection_ids)
return sdk_no_wait(no_wait, client.begin_disconnect_virtual_network_gateway_vpn_connections,
resource_group_name, virtual_network_gateway_name, request)
# endregion
# region VirtualNetworkGatewayConnections
# pylint: disable=too-many-locals
def create_vpn_connection(cmd, resource_group_name, connection_name, vnet_gateway1,
location=None, tags=None, no_wait=False, validate=False,
vnet_gateway2=None, express_route_circuit2=None, local_gateway2=None,
authorization_key=None, enable_bgp=False, routing_weight=10,
connection_type=None, shared_key=None,
use_policy_based_traffic_selectors=False,
express_route_gateway_bypass=None, ingress_nat_rule=None, egress_nat_rule=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import build_vpn_connection_resource
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
tags = tags or {}
# Build up the ARM template
master_template = ArmTemplateBuilder()
vpn_connection_resource = build_vpn_connection_resource(
cmd, connection_name, location, tags, vnet_gateway1,
vnet_gateway2 or local_gateway2 or express_route_circuit2,
connection_type, authorization_key, enable_bgp, routing_weight, shared_key,
use_policy_based_traffic_selectors, express_route_gateway_bypass, ingress_nat_rule, egress_nat_rule)
master_template.add_resource(vpn_connection_resource)
master_template.add_output('resource', connection_name, output_type='object')
if shared_key:
master_template.add_secure_parameter('sharedKey', shared_key)
if authorization_key:
master_template.add_secure_parameter('authorizationKey', authorization_key)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vpn_connection_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_vpn_connection(cmd, instance, routing_weight=None, shared_key=None, tags=None,
enable_bgp=None, use_policy_based_traffic_selectors=None,
express_route_gateway_bypass=None):
with cmd.update_context(instance) as c:
c.set_param('routing_weight', routing_weight)
c.set_param('shared_key', shared_key)
c.set_param('tags', tags)
c.set_param('enable_bgp', enable_bgp)
c.set_param('express_route_gateway_bypass', express_route_gateway_bypass)
c.set_param('use_policy_based_traffic_selectors', use_policy_based_traffic_selectors)
# TODO: Remove these when issue #1615 is fixed
gateway1_id = parse_resource_id(instance.virtual_network_gateway1.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway1_id['subscription'])
instance.virtual_network_gateway1 = ncf.virtual_network_gateways.get(
gateway1_id['resource_group'], gateway1_id['name'])
if instance.virtual_network_gateway2:
gateway2_id = parse_resource_id(instance.virtual_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.virtual_network_gateway2 = ncf.virtual_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
if instance.local_network_gateway2:
gateway2_id = parse_resource_id(instance.local_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.local_network_gateway2 = ncf.local_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
return instance
def list_vpn_connections(cmd, resource_group_name, virtual_network_gateway_name=None):
if virtual_network_gateway_name:
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
return client.list_connections(resource_group_name, virtual_network_gateway_name)
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
return client.list(resource_group_name)
def start_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def stop_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def show_vpn_connection_device_config_script(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
vendor, device_family, firmware_version):
VpnDeviceScriptParameters = cmd.get_models('VpnDeviceScriptParameters')
parameters = VpnDeviceScriptParameters(
vendor=vendor,
device_family=device_family,
firmware_version=firmware_version
)
return client.vpn_device_configuration_script(resource_group_name, virtual_network_gateway_connection_name,
parameters=parameters)
# endregion
# region IPSec Policy Commands
def add_vnet_gateway_ipsec_policy(cmd, resource_group_name, gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
if gateway.vpn_client_configuration.vpn_client_ipsec_policies:
gateway.vpn_client_configuration.vpn_client_ipsec_policies.append(new_policy)
else:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = [new_policy]
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def clear_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = None
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
return LongRunningOperation(cmd.cli_ctx)(poller).vpn_client_configuration.vpn_client_ipsec_policies
def list_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
try:
return ncf.get(resource_group_name, gateway_name).vpn_client_configuration.vpn_client_ipsec_policies
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
def add_vpn_conn_ipsec_policy(cmd, client, resource_group_name, connection_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
conn = client.get(resource_group_name, connection_name)
if conn.ipsec_policies:
conn.ipsec_policies.append(new_policy)
else:
conn.ipsec_policies = [new_policy]
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
def clear_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name, no_wait=False):
conn = client.get(resource_group_name, connection_name)
conn.ipsec_policies = None
conn.use_policy_based_traffic_selectors = False
if no_wait:
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
return LongRunningOperation(cmd.cli_ctx)(poller).ipsec_policies
def list_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name):
return client.get(resource_group_name, connection_name).ipsec_policies
def assign_vnet_gateway_aad(cmd, resource_group_name, gateway_name,
aad_tenant, aad_audience, aad_issuer, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = aad_tenant
gateway.vpn_client_configuration.aad_audience = aad_audience
gateway.vpn_client_configuration.aad_issuer = aad_issuer
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_aad(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
return gateway.vpn_client_configuration
def remove_vnet_gateway_aad(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = None
gateway.vpn_client_configuration.aad_audience = None
gateway.vpn_client_configuration.aad_issuer = None
if cmd.supported_api_version(min_api='2020-11-01'):
gateway.vpn_client_configuration.vpn_authentication_types = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def add_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, internal_mappings, external_mappings,
rule_type=None, mode=None, ip_config_id=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
VirtualNetworkGatewayNatRule, VpnNatRuleMapping = cmd.get_models('VirtualNetworkGatewayNatRule',
'VpnNatRuleMapping')
gateway.nat_rules.append(
VirtualNetworkGatewayNatRule(type_properties_type=rule_type, mode=mode, name=name,
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in internal_mappings] if internal_mappings else None,
external_mappings=[VpnNatRuleMapping(address_space=e_map) for e_map in external_mappings] if external_mappings else None,
ip_configuration_id=ip_config_id))
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
return gateway.nat_rules
def remove_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
for rule in gateway.nat_rules:
if name == rule.name:
gateway.nat_rules.remove(rule)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
raise UnrecognizedArgumentError(f'Do not find nat_rules named {name}!!!')
# endregion
# region VirtualHub
def create_virtual_hub(cmd, client,
resource_group_name,
virtual_hub_name,
hosted_subnet,
public_ip_address=None,
location=None,
tags=None):
from azure.core.exceptions import HttpResponseError
from azure.cli.core.commands import LongRunningOperation
try:
client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualHub "{}" under resource group "{}" exists'.format(
virtual_hub_name, resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location,
virtual_wan=None,
sku='Standard')
vhub_poller = client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
ip_config = HubIpConfiguration(
subnet=SubResource(id=hosted_subnet),
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(
resource_group_name, virtual_hub_name, 'Default', ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
try:
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
except HttpResponseError:
pass
client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return client.get(resource_group_name, virtual_hub_name)
def virtual_hub_update_setter(client, resource_group_name, virtual_hub_name, parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, parameters)
def update_virtual_hub(cmd, instance,
tags=None,
allow_branch_to_branch_traffic=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('allow_branch_to_branch_traffic', allow_branch_to_branch_traffic)
return instance
def delete_virtual_hub(cmd, client, resource_group_name, virtual_hub_name, no_wait=False):
from azure.cli.core.commands import LongRunningOperation
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
ip_configs = list(vhub_ip_config_client.list(resource_group_name, virtual_hub_name))
if ip_configs:
ip_config = ip_configs[0] # There will always be only 1
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, ip_config.name)
LongRunningOperation(cmd.cli_ctx)(poller)
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name)
def list_virtual_hub(client, resource_group_name=None):
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_virtual_hub_bgp_connection(cmd, client, resource_group_name, virtual_hub_name, connection_name,
peer_asn, peer_ip, no_wait=False):
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=connection_name, peer_asn=peer_asn, peer_ip=peer_ip)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name,
virtual_hub_name, connection_name, vhub_bgp_conn)
def virtual_hub_bgp_connection_update_setter(client, resource_group_name,
virtual_hub_name, connection_name,
parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, connection_name, parameters)
def update_virtual_hub_bgp_connection(cmd, instance, peer_asn=None, peer_ip=None):
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def delete_virtual_hub_bgp_connection(client, resource_group_name,
virtual_hub_name, connection_name, no_wait=False):
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_learned_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_learned_routes(resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_advertised_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_advertised_routes(resource_group_name, virtual_hub_name, connection_name)
# endregion
# region VirtualRouter
def create_virtual_router(cmd,
resource_group_name,
virtual_router_name,
hosted_gateway=None,
hosted_subnet=None,
location=None,
tags=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
virtual_hub_name = virtual_router_name
try:
vhub_client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualRouter "{}" under resource group "{}" exists'.format(virtual_hub_name,
resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
# for old VirtualRouter
if hosted_gateway is not None:
VirtualRouter = cmd.get_models('VirtualRouter')
virtual_router = VirtualRouter(virtual_router_asn=None,
virtual_router_ips=[],
hosted_subnet=None,
hosted_gateway=SubResource(id=hosted_gateway),
location=location,
tags=tags)
return vrouter_client.begin_create_or_update(resource_group_name, virtual_router_name, virtual_router)
# for VirtualHub
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location, virtual_wan=None, sku='Standard')
ip_config = HubIpConfiguration(subnet=SubResource(id=hosted_subnet))
from azure.cli.core.commands import LongRunningOperation
vhub_poller = vhub_client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(resource_group_name,
virtual_hub_name,
'Default',
ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
vhub_client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_getter(cmd, resource_group_name, virtual_router_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
return vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_setter(cmd, resource_group_name, virtual_router_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs':
client = network_client_factory(cmd.cli_ctx).virtual_hubs
else:
client = network_client_factory(cmd.cli_ctx).virtual_routers
# If the client is virtual_hubs,
# the virtual_router_name represents virtual_hub_name and
# the parameters represents VirtualHub
return client.begin_create_or_update(resource_group_name, virtual_router_name, parameters)
def update_virtual_router(cmd, instance, tags=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_virtual_router(cmd, resource_group_name=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
if resource_group_name is not None:
vrouters = vrouter_client.list_by_resource_group(resource_group_name)
vhubs = vhub_client.list_by_resource_group(resource_group_name)
else:
vrouters = vrouter_client.list()
vhubs = vhub_client.list()
return list(vrouters) + list(vhubs)
def show_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
item = vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
virtual_hub_name = virtual_router_name
item = vhub_client.get(resource_group_name, virtual_hub_name)
return item
def delete_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
item = vrouter_client.begin_delete(resource_group_name, virtual_router_name)
except HttpResponseError:
from azure.cli.core.commands import LongRunningOperation
virtual_hub_name = virtual_router_name
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
LongRunningOperation(cmd.cli_ctx)(poller)
item = vhub_client.begin_delete(resource_group_name, virtual_hub_name)
return item
def create_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name, peer_asn, peer_ip):
# try VirtualRouter first
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
VirtualRouterPeering = cmd.get_models('VirtualRouterPeering')
virtual_router_peering = VirtualRouterPeering(peer_asn=peer_asn, peer_ip=peer_ip)
return vrouter_peering_client.begin_create_or_update(resource_group_name,
virtual_router_name,
peering_name,
virtual_router_peering)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=peering_name, peer_asn=peer_asn, peer_ip=peer_ip)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_create_or_update(resource_group_name, virtual_hub_name,
bgp_conn_name, vhub_bgp_conn)
def virtual_router_peering_update_getter(cmd, resource_group_name, virtual_router_name, peering_name):
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
from azure.core.exceptions import HttpResponseError
try:
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def virtual_router_peering_update_setter(cmd, resource_group_name, virtual_router_name, peering_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs/bgpConnections':
client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
else:
client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
# if the client is virtual_hub_bgp_connection,
# the virtual_router_name represents virtual_hub_name and
# the peering_name represents bgp_connection_name and
# the parameters represents BgpConnection
return client.begin_create_or_update(resource_group_name, virtual_router_name, peering_name, parameters)
def update_virtual_router_peering(cmd, instance, peer_asn=None, peer_ip=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def list_virtual_router_peering(cmd, resource_group_name, virtual_router_name):
virtual_hub_name = virtual_router_name
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
try:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
vrouter_peerings = list(vrouter_peering_client.list(resource_group_name, virtual_router_name))
except HttpResponseError:
vrouter_peerings = []
virtual_hub_name = virtual_router_name
try:
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connections
vhub_bgp_connections = list(vhub_bgp_conn_client.list(resource_group_name, virtual_hub_name))
except HttpResponseError:
vhub_bgp_connections = []
return list(vrouter_peerings) + list(vhub_bgp_connections)
def show_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def delete_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except: # pylint: disable=bare-except
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.begin_delete(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_delete(resource_group_name, virtual_hub_name, bgp_conn_name)
# endregion
# region service aliases
def list_service_aliases(cmd, location, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).available_service_aliases
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name, location=location)
return client.list(location=location)
# endregion
# region bastion
def create_bastion_host(cmd, resource_group_name, bastion_host_name, virtual_network_name,
public_ip_address, location=None, subnet='AzureBastionSubnet'):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
(BastionHost,
BastionHostIPConfiguration,
SubResource) = cmd.get_models('BastionHost',
'BastionHostIPConfiguration',
'SubResource')
ip_config_name = "bastion_ip_config"
ip_configuration = BastionHostIPConfiguration(name=ip_config_name,
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip_address))
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location)
return client.begin_create_or_update(resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
parameters=bastion_host)
def list_bastion_host(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
SSH_EXTENSION_NAME = 'ssh'
SSH_EXTENSION_MODULE = 'azext_ssh.custom'
SSH_EXTENSION_VERSION = '0.1.3'
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _test_extension(extension_name):
from azure.cli.core.extension import (get_extension)
from pkg_resources import parse_version
ext = get_extension(extension_name)
if parse_version(ext.version) < parse_version(SSH_EXTENSION_VERSION):
raise CLIError('SSH Extension (version >= "{}") must be installed'.format(SSH_EXTENSION_VERSION))
def _get_ssh_path(ssh_command="ssh"):
import os
ssh_path = ssh_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
is_32bit = arch_data[0] == '32bit'
sys_path = 'SysNative' if is_32bit else 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
ssh_path = os.path.join(system32_path, "openSSH", (ssh_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run ssh from path %s", ssh_path)
if not os.path.isfile(ssh_path):
raise CLIError("Could not find " + ssh_command + ".exe. Is the OpenSSH client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for thie command. Supported platforms: Windows")
return ssh_path
def _get_rdp_path(rdp_command="mstsc"):
import os
rdp_path = rdp_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
sys_path = 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
rdp_path = os.path.join(system32_path, (rdp_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run rdp from path %s", rdp_path)
if not os.path.isfile(rdp_path):
raise CLIError("Could not find " + rdp_command + ".exe. Is the rdp client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for thie command. Supported platforms: Windows")
return rdp_path
def _get_host(username, ip):
return username + "@" + ip
def _build_args(cert_file, private_key_file):
private_key = []
certificate = []
if private_key_file:
private_key = ["-i", private_key_file]
if cert_file:
certificate = ["-o", "CertificateFile=" + cert_file]
return private_key + certificate
def ssh_bastion_host(cmd, auth_type, target_resource_id, resource_group_name, bastion_host_name, resource_port=None, username=None, ssh_key=None):
_test_extension(SSH_EXTENSION_NAME)
if not resource_port:
resource_port = 22
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
if auth_type.lower() == 'password':
if username is None:
raise RequiredArgumentMissingError("Please enter username with --username.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
elif auth_type.lower() == 'aad':
azssh = _get_azext_module(SSH_EXTENSION_NAME, SSH_EXTENSION_MODULE)
public_key_file, private_key_file = azssh._check_or_create_public_private_files(None, None) # pylint: disable=protected-access
cert_file, username = azssh._get_and_write_certificate(cmd, public_key_file, private_key_file + '-cert.pub') # pylint: disable=protected-access
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(cert_file, private_key_file)
elif auth_type.lower() == 'ssh-key':
if username is None or ssh_key is None:
raise RequiredArgumentMissingError("Please enter username --username and ssh cert location --ssh-key.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(None, ssh_key)
else:
raise UnrecognizedArgumentError("Unknown auth type. Use one of password, aad or ssh-key.")
command = command + ["-p", str(tunnel_server.local_port)]
command = command + ['-o', "StrictHostKeyChecking=no", '-o', "UserKnownHostsFile=/dev/null"]
command = command + ['-o', "LogLevel=Error"]
logger.debug("Running ssh command %s", ' '.join(command))
try:
subprocess.call(command, shell=platform.system() == 'Windows')
except Exception as ex:
raise CLIInternalError(ex)
def rdp_bastion_host(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port=None):
if not resource_port:
resource_port = 3389
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
command = [_get_rdp_path(), "/v:localhost:{0}".format(tunnel_server.local_port)]
logger.debug("Running rdp command %s", ' '.join(command))
subprocess.call(command, shell=platform.system() == 'Windows')
tunnel_server.cleanup()
def get_tunnel(cmd, resource_group_name, name, vm_id, resource_port, port=None):
from .tunnel import TunnelServer
client = network_client_factory(cmd.cli_ctx).bastion_hosts
bastion = client.get(resource_group_name, name)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
tunnel_server = TunnelServer(cmd.cli_ctx, 'localhost', port, bastion, vm_id, resource_port)
return tunnel_server
def create_bastion_tunnel(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port, port, timeout=None):
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port, port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
# endregion
# region security partner provider
def create_security_partner_provider(cmd, resource_group_name, security_partner_provider_name,
security_provider_name, virtual_hub, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
SecurityPartnerProvider, SubResource = cmd.get_models('SecurityPartnerProvider', 'SubResource')
security_partner_provider = SecurityPartnerProvider(security_provider_name=security_provider_name,
virtual_hub=SubResource(id=virtual_hub),
location=location,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=security_partner_provider)
def update_security_partner_provider(instance, cmd, security_provider_name=None, virtual_hub=None, tags=None):
with cmd.update_context(instance) as c:
c.set_param('security_provider_name', security_provider_name)
c.set_param('virtual_hub', virtual_hub)
c.set_param('tags', tags)
return instance
def list_security_partner_provider(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
# endregion
# region network gateway connection
def reset_shared_key(cmd, client, virtual_network_gateway_connection_name, key_length, resource_group_name=None):
ConnectionResetSharedKey = cmd.get_models('ConnectionResetSharedKey')
shared_key = ConnectionResetSharedKey(key_length=key_length)
return client.begin_reset_shared_key(resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name, # pylint: disable=line-too-long
parameters=shared_key)
def update_shared_key(cmd, instance, value):
with cmd.update_context(instance) as c:
c.set_param('value', value)
return instance
# endregion
# region network virtual appliance
def create_network_virtual_appliance(cmd, client, resource_group_name, network_virtual_appliance_name,
vendor, bundled_scale_unit, market_place_version,
virtual_hub, boot_strap_configuration_blobs=None,
cloud_init_configuration_blobs=None,
cloud_init_configuration=None, asn=None,
location=None, tags=None, no_wait=False):
(NetworkVirtualAppliance,
SubResource,
VirtualApplianceSkuProperties) = cmd.get_models('NetworkVirtualAppliance',
'SubResource',
'VirtualApplianceSkuProperties')
virtual_appliance = NetworkVirtualAppliance(boot_strap_configuration_blobs=boot_strap_configuration_blobs,
cloud_init_configuration_blobs=cloud_init_configuration_blobs,
cloud_init_configuration=cloud_init_configuration,
virtual_appliance_asn=asn,
virtual_hub=SubResource(id=virtual_hub),
nva_sku=VirtualApplianceSkuProperties(
vendor=vendor,
bundled_scale_unit=bundled_scale_unit,
market_place_version=market_place_version
),
location=location,
tags=tags)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, virtual_appliance)
def update_network_virtual_appliance(instance, cmd, cloud_init_configuration=None, asn=None):
with cmd.update_context(instance) as c:
c.set_param('virtual_appliance_asn', asn)
c.set_param('cloud_init_configuration', cloud_init_configuration)
return instance
def list_network_virtual_appliance(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def create_network_virtual_appliance_site(cmd, client, resource_group_name, network_virtual_appliance_name,
site_name, address_prefix, allow=None, optimize=None, default=None,
no_wait=False):
(BreakOutCategoryPolicies,
Office365PolicyProperties,
VirtualApplianceSite) = cmd.get_models('BreakOutCategoryPolicies',
'Office365PolicyProperties',
'VirtualApplianceSite')
virtual_appliance_site = VirtualApplianceSite(address_prefix=address_prefix,
o365_policy=Office365PolicyProperties(
break_out_categories=BreakOutCategoryPolicies(
allow=allow,
optimize=optimize,
default=default
)))
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, site_name, virtual_appliance_site)
def update_network_virtual_appliance_site(instance, cmd, address_prefix, allow=None, optimize=None, default=None):
with cmd.update_context(instance) as c:
c.set_param('address_prefix', address_prefix)
c.set_param('o365_policy.break_out_categories.allow', allow)
c.set_param('o365_policy.break_out_categories.optimize', optimize)
c.set_param('o365_policy.break_out_categories.default', default)
return instance
# endregion
|
[] |
[] |
[
"SystemRoot"
] |
[]
|
["SystemRoot"]
|
python
| 1 | 0 | |
test/com/facebook/buck/java/MissingSymbolsHandlerIntegrationTest.java
|
/*
* Copyright 2014-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.java;
import static com.facebook.buck.java.JavaCompilationConstants.DEFAULT_JAVAC_OPTIONS;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.junit.Assert.assertEquals;
import com.facebook.buck.cli.BuckConfig;
import com.facebook.buck.cli.MissingSymbolsHandler;
import com.facebook.buck.event.BuckEventBus;
import com.facebook.buck.event.BuckEventBusFactory;
import com.facebook.buck.event.MissingSymbolEvent;
import com.facebook.buck.io.ProjectFilesystem;
import com.facebook.buck.model.BuildTarget;
import com.facebook.buck.rules.DefaultKnownBuildRuleTypes;
import com.facebook.buck.rules.Description;
import com.facebook.buck.testutil.TestConsole;
import com.facebook.buck.testutil.integration.DebuggableTemporaryFolder;
import com.facebook.buck.testutil.integration.ProjectWorkspace;
import com.facebook.buck.testutil.integration.TestDataHelper;
import com.facebook.buck.util.environment.Platform;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSetMultimap;
import org.junit.Rule;
import org.junit.Test;
import java.io.IOException;
import java.nio.file.Paths;
public class MissingSymbolsHandlerIntegrationTest {
@Rule
public DebuggableTemporaryFolder temporaryFolder = new DebuggableTemporaryFolder();
@Test
public void shouldFindNeededDependenciesFromSymbols() throws IOException, InterruptedException {
ProjectWorkspace workspace = TestDataHelper.createProjectWorkspaceForScenario(
this, "symbol_finder", temporaryFolder);
workspace.setUp();
ProjectFilesystem projectFilesystem = new ProjectFilesystem(temporaryFolder.getRootPath());
ImmutableMap<String, String> environment = ImmutableMap.copyOf(System.getenv());
BuckConfig config = BuckConfig.createFromFiles(
projectFilesystem,
ImmutableList.of(projectFilesystem.getFileForRelativePath(".buckconfig")),
Platform.detect(),
environment);
ImmutableSet<Description<?>> allDescriptions =
DefaultKnownBuildRuleTypes
.getDefaultKnownBuildRuleTypes(projectFilesystem)
.getAllDescriptions();
BuckEventBus buckEventBus = BuckEventBusFactory.newInstance();
MissingSymbolsHandler missingSymbolsHandler = MissingSymbolsHandler.create(
projectFilesystem,
allDescriptions,
config,
buckEventBus,
new TestConsole(),
DEFAULT_JAVAC_OPTIONS,
environment);
MissingSymbolEvent missingSymbolEvent = MissingSymbolEvent.create(
BuildTarget.builder("//java/com/example/b", "b").build(),
"com.example.a.A",
MissingSymbolEvent.SymbolType.Java);
ImmutableSetMultimap<BuildTarget, BuildTarget> neededDeps =
missingSymbolsHandler.getNeededDependencies(ImmutableList.of(missingSymbolEvent));
assertEquals(
"MissingSymbolsHandler failed to find the needed dependency.",
neededDeps,
ImmutableSetMultimap.of(
BuildTarget.builder("//java/com/example/b", "b").build(),
BuildTarget.builder("//java/com/example/a", "a").build()));
}
@Test
public void shouldPrintNeededSymbolsFromBuild() throws IOException {
ProjectWorkspace workspace = TestDataHelper.createProjectWorkspaceForScenario(
this, "symbol_finder", temporaryFolder);
workspace.setUp();
ProjectWorkspace.ProcessResult processResult = workspace.runBuckBuild("//java/com/example/b:b");
processResult.assertFailure("Build with missing dependencies should fail.");
String expectedDependencyOutput = String.format(
"%s (:b) is missing deps:\n" +
" ':moreb',\n" +
" '//java/com/example/a:a',\n",
Paths.get("java/com/example/b/BUCK"));
assertThat(
"Output should describe the missing dependency.",
processResult.getStdout(),
containsString(expectedDependencyOutput));
}
@Test
public void shouldPrintNeededSymbolsFromTest() throws IOException {
ProjectWorkspace workspace = TestDataHelper.createProjectWorkspaceForScenario(
this, "symbol_finder", temporaryFolder);
workspace.setUp();
ProjectWorkspace.ProcessResult processResult = workspace.runBuckCommand(
"test",
"//java/com/example/b:test");
processResult.assertFailure("Test with missing dependencies should fail.");
String expectedDependencyOutput = String.format(
"%s (:test) is missing deps:\n" +
" ':moreb',\n" +
" '//java/com/example/a:a',\n",
Paths.get("java/com/example/b/BUCK"));
assertThat(
"Output should describe the missing dependency.",
processResult.getStdout(),
containsString(expectedDependencyOutput));
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
examples/cifar/main.py
|
# Copyright 2020 Lorna Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Evaluate on cifar. Note that at the moment, training is not implemented (I am working on it).
that being said, evaluation is working.
"""
import argparse
import os
import random
import shutil
import time
import warnings
import PIL
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from apex import amp
from vggnet import VGGNet
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', default='data',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
help='model architecture (default: resnet18)')
parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
help='number of data loading workers (default: 0)')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.05, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--opt_level', default="O1", type=str,
help="Choose which accuracy to train. (default: 'O1')")
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://172.168.1.1:11111', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--image_size', default=32, type=int,
help='image size')
parser.add_argument('--num_classes', type=int, default=10,
help="number of dataset category.")
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
args = parser.parse_args()
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if 'vgg' in args.arch: # NEW
if args.pretrained:
model = VGGNet.from_pretrained(args.arch, num_classes=args.num_classes)
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = VGGNet.from_name(args.arch)
else:
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
amp.load_state_dict(checkpoint['amp'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.CIFAR10(
root=args.data,
train=True,
download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
test_dataset = datasets.CIFAR10(
root=args.data,
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
top1, top5 = validate(test_loader, model, criterion, args)
with open("res.txt", "w") as f:
print(f"Acc@1: {top1}\tAcc@5: {top5}", file=f)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1, _ = validate(test_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
'amp': amp.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.6f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
def validate(test_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.6f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(test_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(test_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.5 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_parameter_number(model, image_size, channels=3):
from torchstat import stat
stat(model, (channels, image_size, image_size))
def print_state_dict(model):
print("----------------------------------------------------------")
print("| state dict pram |")
print("----------------------------------------------------------")
for param_tensor in model.state_dict():
print(param_tensor, '\t', model.state_dict()[param_tensor].size())
print("----------------------------------------------------------")
if __name__ == '__main__':
main()
|
[] |
[] |
[
"RANK",
"WORLD_SIZE"
] |
[]
|
["RANK", "WORLD_SIZE"]
|
python
| 2 | 0 | |
repldex/backend/database.py
|
from datetime import datetime
import motor.motor_asyncio
import uuid
import dns # noqa: F401 (required so replit always installs it)
import os
connection_uri = os.getenv('dburi')
client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri)
db = client['repldex']
entries_coll = db['entries']
sessions_coll = db['sessions']
users_coll = db['users']
from repldex.discordbot import bot as discordbot
from repldex import utils
from repldex.backend import images
async def fix_entry(data):
if data is None:
return
original_data = dict(data)
if data.get('image') and isinstance(data['image'], str):
data['image'] = data['image'].replace('imag.cf', 'i.matdoes.dev')
if data.get('image') and isinstance(data.get('image'), str):
data['image'] = await images.get_data(data['image'])
elif data.get('image') and not data['image'].get('thumbnail_b64'):
data['image'] = await images.get_data(data['image']['src'])
if data != original_data:
# print('updated', data['_id'])
await entries_coll.update_one({'_id': data['_id']}, {'$set': data})
data['content'] = utils.fix_html(data['content'])
if 'nohtml_content' not in data:
data['nohtml_content'] = utils.remove_html(data['content'])
return data
async def delete_entry(title, content, entry_id, editor=None):
t = datetime.now()
await discordbot.log_delete(title, t, content)
await entries_coll.delete_one({'_id': entry_id})
async def edit_entry(
title, content, editor=None, unlisted=False, entry_id=None, image=None, editor_real=None, impersonate=None
):
t = datetime.now()
title = title.strip()
await discordbot.log_edit(editor, title, t)
content = utils.fix_html(content)
nohtml_content = utils.remove_html(content)
new_data = {'title': title, 'content': content, 'last_edited': t, 'nohtml_content': nohtml_content}
if unlisted is not None:
new_data['unlisted'] = unlisted
if image is not None:
new_data['image'] = {'src': image}
if not entry_id:
entry_id = str(uuid.uuid4())
new_history_data = {
'author': editor,
'content': content,
'title': title,
'time': t,
'unlisted': unlisted,
}
if image is not None:
new_history_data['image'] = {'src': image}
await entries_coll.update_one({'_id': entry_id}, {'$set': new_data, '$push': {'history': new_history_data}}, upsert=True)
return entry_id
async def get_entry(entry_id=None, name=None, search_id=True, owner=None):
if not entry_id and name:
entries = await search_entries(name, limit=1, search_id=search_id)
if not entries:
return
return entries[0]
elif owner:
# print(type(owner), owner)
found = await entries_coll.find_one({'owner_id': owner})
else:
found = await entries_coll.find_one({'_id': entry_id})
found = await fix_entry(found)
return found
async def new_editor_session(discord_id):
sid = str(uuid.uuid4())
await sessions_coll.insert_one({'_id': sid, 'discord': discord_id, 'time': datetime.now()})
return sid
async def get_editor_session(sid):
if not hasattr(get_editor_session, 'cache'):
get_editor_session.cache = {}
if sid in get_editor_session.cache:
found = get_editor_session.cache[sid]
else:
found = await sessions_coll.find_one({'_id': sid})
get_editor_session.cache[sid] = found
if found is None:
return
return found['discord']
async def search_entries(query, limit=10, search_id=True, page=0, discord_id=None, unlisted=False):
print('searching')
found = []
match = {'$match': {'unlisted': {'$ne': True}}}
if unlisted:
match = {'$match': {'unlisted': {'$eq': True}}}
async for doc in entries_coll.aggregate([
{'$searchBeta': {'compound': {'should': [
{'search': {'query': query, 'path': 'nohtml_content'}},
{'search': {'query': query, 'path': 'title', 'score': {'boost': {'value': 20}}}},
]}}},
match,
{'$addFields': {'score': {'$meta': 'searchScore'}}},
{'$sort': {'score': -1}},
{'$skip': page * limit},
{'$limit': limit},
]):
found.append(await fix_entry(doc))
if len(found) == 0 and search_id:
found = await get_entry(query)
if found:
return [found]
if found is None:
found = []
if len(found) == 0:
searched = await entries_coll.find_one({'title': query, 'unlisted': {'$ne': True}})
if searched:
found = [searched]
if len(found) == 0:
if query.startswith('<@') and query.endswith('>'):
entry_owner_id = query[2:-1]
if entry_owner_id[0] == '!':
entry_owner_id = entry_owner_id[1:]
entry_owner_id = int(entry_owner_id)
owned_entry = await entries_coll.find_one({'owner_id': entry_owner_id})
if entry_owner_id:
found = [owned_entry]
print(found)
return found
# Query is only if sort == relevant
async def get_entries(sort, limit=20, page=0, query=None, discord_id=None, unlisted=False):
# match = {'$match': {'unlisted': {'$ne': True}}}
# if discord_id is not None:
# if discord_id in ADMIN_IDS:
# match = {}
if sort == 'relevant' and query:
found = await search_entries(query, limit=limit, page=page, discord_id=discord_id, unlisted=unlisted)
return found
cursor = entries_coll.find({'unlisted': {'$ne': True}})
cursor = cursor.sort(sort, -1)
cursor = cursor.skip(page * limit)
cursor = cursor.limit(limit)
found = []
async for entry in cursor:
entry = await fix_entry(entry)
found.append(entry)
return found
async def set_personal_entry(discord_id, entry_id):
user_data = {
'personal_entry': entry_id,
}
await users_coll.update_one({'_id': discord_id}, {'$set': user_data}, upsert=True)
async for entry in entries_coll.find({'owner_id': discord_id}):
await entries_coll.update_one({'_id': entry['_id']}, {'$set': {'owner_id': None}})
await entries_coll.update_one({'_id': entry_id}, {'$set': {'owner_id': discord_id}})
try:
if hasattr(get_personal_entry, 'cache'):
get_personal_entry.cache[discord_id] = user_data
except Exception as e:
print('BRUH MOMENT', e)
async def get_personal_entry(discord_id):
if not hasattr(get_personal_entry, 'cache'):
get_personal_entry.cache = {}
if discord_id in get_personal_entry.cache:
found = get_personal_entry.cache[discord_id]
else:
found = await users_coll.find_one({'_id': discord_id})
get_personal_entry.cache[discord_id] = found
if found is None:
return
return found.get('personal_entry')
async def count_entries():
count = await entries_coll.count_documents({'unlisted': {'$ne': True}})
return count
async def get_random_entry():
cursor = entries_coll.aggregate([{'$match': {'unlisted': {'$ne': True}}}, {'$sample': {'size': 1}}])
found = []
async for entry in cursor:
found.append(entry)
return found[0]
|
[] |
[] |
[
"dburi"
] |
[]
|
["dburi"]
|
python
| 1 | 0 | |
src/runtime/virtcontainers/device/drivers/vhost_user_blk.go
|
// Copyright (c) 2017-2018 Intel Corporation
// Copyright (c) 2018-2019 Huawei Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package drivers
import (
"context"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
"github.com/sirupsen/logrus"
)
// VhostUserBlkDevice is a block vhost-user based device
type VhostUserBlkDevice struct {
*GenericDevice
VhostUserDeviceAttrs *config.VhostUserDeviceAttrs
}
// NewVhostUserBlkDevice creates a new vhost-user block device based on DeviceInfo
func NewVhostUserBlkDevice(devInfo *config.DeviceInfo) *VhostUserBlkDevice {
return &VhostUserBlkDevice{
GenericDevice: &GenericDevice{
ID: devInfo.ID,
DeviceInfo: devInfo,
},
}
}
//
// VhostUserBlkDevice's implementation of the device interface:
//
// Attach is standard interface of api.Device, it's used to add device to some
// DeviceReceiver
func (device *VhostUserBlkDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
skip, err := device.bumpAttachCount(true)
if err != nil {
return err
}
if skip {
return nil
}
// From the explanation of function attach in block.go, block index of
// a general block device is utilized for some situation.
// Since vhost-user-blk uses "vd" prefix in Linux kernel, not "sd",
// sandbox block index should be updated only if sandbox default block
// driver is "virtio-blk"/"virtio-blk-ccw"/"virtio-mmio" which uses
// "vd" prefix in Linux kernel.
index := -1
updateBlockIndex := isVirtioBlkBlockDriver(device.DeviceInfo.DriverOptions)
if updateBlockIndex {
index, err = devReceiver.GetAndSetSandboxBlockIndex()
}
defer func() {
if err != nil {
if updateBlockIndex {
devReceiver.UnsetSandboxBlockIndex(index)
}
device.bumpAttachCount(false)
}
}()
if err != nil {
return err
}
vAttrs := &config.VhostUserDeviceAttrs{
DevID: utils.MakeNameID("blk", device.DeviceInfo.ID, maxDevIDSize),
SocketPath: device.DeviceInfo.HostPath,
Type: config.VhostUserBlk,
Index: index,
}
deviceLogger().WithFields(logrus.Fields{
"device": device.DeviceInfo.HostPath,
"SocketPath": vAttrs.SocketPath,
"Type": config.VhostUserBlk,
"Index": index,
}).Info("Attaching device")
device.VhostUserDeviceAttrs = vAttrs
if err = devReceiver.HotplugAddDevice(ctx, device, config.VhostUserBlk); err != nil {
return err
}
return nil
}
func isVirtioBlkBlockDriver(customOptions map[string]string) bool {
var blockDriverOption string
if customOptions == nil {
// User has not chosen a specific block device type
// Default to SCSI
blockDriverOption = config.VirtioSCSI
} else {
blockDriverOption = customOptions[config.BlockDriverOpt]
}
if blockDriverOption == config.VirtioBlock ||
blockDriverOption == config.VirtioBlockCCW ||
blockDriverOption == config.VirtioMmio {
return true
}
return false
}
// Detach is standard interface of api.Device, it's used to remove device from some
// DeviceReceiver
func (device *VhostUserBlkDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
skip, err := device.bumpAttachCount(false)
if err != nil {
return err
}
if skip {
return nil
}
defer func() {
if err != nil {
device.bumpAttachCount(true)
} else {
updateBlockIndex := isVirtioBlkBlockDriver(device.DeviceInfo.DriverOptions)
if updateBlockIndex {
devReceiver.UnsetSandboxBlockIndex(device.VhostUserDeviceAttrs.Index)
}
}
}()
deviceLogger().WithField("device", device.DeviceInfo.HostPath).Info("Unplugging vhost-user-blk device")
if err = devReceiver.HotplugRemoveDevice(ctx, device, config.VhostUserBlk); err != nil {
deviceLogger().WithError(err).Error("Failed to unplug vhost-user-blk device")
return err
}
return nil
}
// DeviceType is standard interface of api.Device, it returns device type
func (device *VhostUserBlkDevice) DeviceType() config.DeviceType {
return config.VhostUserBlk
}
// GetDeviceInfo returns device information used for creating
func (device *VhostUserBlkDevice) GetDeviceInfo() interface{} {
return device.VhostUserDeviceAttrs
}
// Save converts Device to DeviceState
func (device *VhostUserBlkDevice) Save() persistapi.DeviceState {
ds := device.GenericDevice.Save()
ds.Type = string(device.DeviceType())
vAttr := device.VhostUserDeviceAttrs
if vAttr != nil {
ds.VhostUserDev = &persistapi.VhostUserDeviceAttrs{
DevID: vAttr.DevID,
SocketPath: vAttr.SocketPath,
Type: string(vAttr.Type),
PCIPath: vAttr.PCIPath,
Index: vAttr.Index,
}
}
return ds
}
// Load loads DeviceState and converts it to specific device
func (device *VhostUserBlkDevice) Load(ds persistapi.DeviceState) {
device.GenericDevice = &GenericDevice{}
device.GenericDevice.Load(ds)
dev := ds.VhostUserDev
if dev == nil {
return
}
device.VhostUserDeviceAttrs = &config.VhostUserDeviceAttrs{
DevID: dev.DevID,
SocketPath: dev.SocketPath,
Type: config.DeviceType(dev.Type),
PCIPath: dev.PCIPath,
Index: dev.Index,
}
}
// It should implement GetAttachCount() and DeviceID() as api.Device implementation
// here it shares function from *GenericDevice so we don't need duplicate codes
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/main/java/Main.java
|
import io.vertx.core.Vertx;
import io.vertx.core.http.HttpServer;
import io.vertx.ext.web.Router;
import io.vertx.ext.web.handler.BodyHandler;
import io.vertx.ext.web.handler.LoggerFormat;
import io.vertx.ext.web.handler.LoggerHandler;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Main {
private static final Logger logger = LoggerFactory.getLogger(Main.class);
private static KafkaStresser kafkaStresser;
public static void main(String[] args) {
String kafkaServers = System.getenv("KAFKA_SERVERS");
if (kafkaServers == null) {
logger.error("KAFKA_SERVERS is not set");
System.exit(1);
}
kafkaStresser = new KafkaStresser(kafkaServers);
Vertx vertx = Vertx.vertx();
Router api = Router.router(vertx);
api.post("/start").handler(ctx -> {
try {
JSONObject body = new JSONObject(ctx.getBodyAsString());
int topicsCount = body.getInt("topicsCount");
int partitionsPerTopic = body.getInt("partitionsPerTopic");
logger.info("starting a new session with {} topics and {} partitions per topic",
topicsCount, partitionsPerTopic);
kafkaStresser.start(topicsCount, partitionsPerTopic);
ctx.response().setStatusCode(200).end(status(null));
} catch (JSONException ex) {
logger.error("invalid json message format received", ex);
ctx.response().setStatusCode(400).end(status(ex));
} catch (Exception ex) {
logger.error("error", ex);
ctx.response().setStatusCode(500).end(status(ex));
}
});
api.post("/stop").handler(ctx -> {
try {
logger.info("stopping session");
kafkaStresser.stop();
ctx.response().setStatusCode(200).end(status(null));
} catch (Exception ex) {
logger.error("error", ex);
ctx.response().setStatusCode(500).end(status(ex));
}
});
Router mainRouter = Router.router(vertx);
mainRouter.route().handler(BodyHandler.create());
mainRouter.route().handler(LoggerHandler.create(LoggerFormat.TINY));
mainRouter.route().consumes("application/json").produces("application/json");
mainRouter.route().handler(event -> {
event.response()
.putHeader("Content-Type", "application/json")
.putHeader("Accept-Encoding", "gzip,deflate");
event.next();
});
mainRouter.mountSubRouter("/api", api);
logger.info("starting http server");
HttpServer server = vertx.createHttpServer().requestHandler(mainRouter);
server.listen(8080);
}
private static String status(Throwable throwable) {
JSONObject root = new JSONObject();
if (throwable == null) {
root.put("status", "success");
} else {
root.put("status", "failure");
root.put("reason", throwable.getMessage());
}
return root.toString(4);
}
}
|
[
"\"KAFKA_SERVERS\""
] |
[] |
[
"KAFKA_SERVERS"
] |
[]
|
["KAFKA_SERVERS"]
|
java
| 1 | 0 | |
test/distributed/test_distributed_fork.py
|
import os
import sys
import tempfile
from functools import wraps
import torch
import torch.cuda
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import TestCase, find_free_port, run_tests
from torch.distributed.distributed_c10d import _get_default_group
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
torch.backends.cuda.matmul.allow_tf32 = False
CPP_EXTENSIONS_WARNING = """
Ninja (https://ninja-build.org) must be available to run C++ extensions tests,
but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`.
"""
BACKEND = os.environ["BACKEND"]
INIT_METHOD = os.getenv("INIT_METHOD", "env://")
def skip_if_no_ninja(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
import torch.utils.cpp_extension
torch.utils.cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_WARNING)
return 0
return func(*args, **kwargs)
return wrapper
if BACKEND == "gloo" or BACKEND == "nccl":
class TestDistBackendWithFork(TestDistBackend, DistributedTest._DistTestBase):
def setUp(self):
super().setUp()
self._fork_processes()
torch.backends.cudnn.flags(allow_tf32=False).__enter__()
elif BACKEND == "mpi":
WORLD_SIZE = os.environ["WORLD_SIZE"]
dist.init_process_group(init_method=INIT_METHOD, backend="mpi")
class TestMPIWithFork(TestCase, DistributedTest._DistTestBase):
pass
elif BACKEND == "test":
class TestBackendDynamicLoad(TestCase):
def setUp(self):
super(TestBackendDynamicLoad, self).setUp()
def _load_test_backend(self):
temp_dir = tempfile.mkdtemp()
src = "{}/../cpp_extensions/cpp_c10d_extension.cpp".format(os.path.abspath(os.path.dirname(__file__)))
extension = torch.utils.cpp_extension.load(
name="torch_test",
sources=[src],
build_directory=temp_dir
)
@skip_if_no_ninja
def test_backend_apis(self):
self._load_test_backend()
os.environ['WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = str(find_free_port())
os.environ['RANK'] = '0'
dist.init_process_group(backend='test', init_method='env://', world_size=1, rank=0)
self.assertEqual(dist.get_rank(), 0)
self.assertEqual(dist.get_world_size(), 1)
process_group = _get_default_group()
work = process_group.allreduce([torch.rand(1), torch.rand(1)])
self.assertTrue(work.wait())
self.assertTrue(work.is_completed())
self.assertTrue(work.is_success())
work = process_group.broadcast([torch.rand(1)])
self.assertTrue(work.wait())
self.assertTrue(work.is_completed())
self.assertTrue(work.is_success())
dist.destroy_process_group()
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT",
"BACKEND",
"RANK",
"INIT_METHOD",
"WORLD_SIZE"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT", "BACKEND", "RANK", "INIT_METHOD", "WORLD_SIZE"]
|
python
| 6 | 0 | |
embedded_struct_test.go
|
package gorm_test
import (
"os"
"testing"
)
type BasePost struct {
Id int64
Title string
URL string
}
type Author struct {
ID string
Name string
Email string
}
type HNPost struct {
BasePost
Author `gorm:"embedded_prefix:user_"` // Embedded struct
Upvotes int32
}
type EngadgetPost struct {
BasePost BasePost `gorm:"embedded"`
Author Author `gorm:"embedded;embedded_prefix:author_"` // Embedded struct
ImageUrl string
}
func TestPrefixColumnNameForEmbeddedStruct(t *testing.T) {
if dialect := os.Getenv("GORM_DIALECT"); dialect == "oracle" {
t.Skip("Skipping this because I do not spend time in the first round :)")
}
dialect := DB.NewScope(&EngadgetPost{}).Dialect()
engadgetPostScope := DB.NewScope(&EngadgetPost{})
if !dialect.HasColumn(engadgetPostScope.TableName(), "author_id") || !dialect.HasColumn(engadgetPostScope.TableName(), "author_name") || !dialect.HasColumn(engadgetPostScope.TableName(), "author_email") {
t.Errorf("should has prefix for embedded columns")
}
if len(engadgetPostScope.PrimaryFields()) != 1 {
t.Errorf("should have only one primary field with embedded struct, but got %v", len(engadgetPostScope.PrimaryFields()))
}
hnScope := DB.NewScope(&HNPost{})
if !dialect.HasColumn(hnScope.TableName(), "user_id") || !dialect.HasColumn(hnScope.TableName(), "user_name") || !dialect.HasColumn(hnScope.TableName(), "user_email") {
t.Errorf("should has prefix for embedded columns")
}
}
func TestSaveAndQueryEmbeddedStruct(t *testing.T) {
DB.Save(&HNPost{BasePost: BasePost{Id: 1, Title: "news"}})
DB.Save(&HNPost{BasePost: BasePost{Id: 2, Title: "hn_news"}})
var news HNPost
if err := DB.First(&news, "title = ?", "hn_news").Error; err != nil {
t.Errorf("no error should happen when query with embedded struct, but got %v", err)
} else if news.Title != "hn_news" {
t.Errorf("embedded struct's value should be scanned correctly")
}
DB.Save(&EngadgetPost{BasePost: BasePost{Id: 3, Title: "engadget_news"}})
var egNews EngadgetPost
if err := DB.First(&egNews, "title = ?", "engadget_news").Error; err != nil {
t.Errorf("no error should happen when query with embedded struct, but got %v", err)
} else if egNews.BasePost.Title != "engadget_news" {
t.Errorf("embedded struct's value should be scanned correctly")
}
if DB.NewScope(&HNPost{}).PrimaryField() == nil {
t.Errorf("primary key with embedded struct should works")
}
for _, field := range DB.NewScope(&HNPost{}).Fields() {
if field.Name == "BasePost" {
t.Errorf("scope Fields should not contain embedded struct")
}
}
}
|
[
"\"GORM_DIALECT\""
] |
[] |
[
"GORM_DIALECT"
] |
[]
|
["GORM_DIALECT"]
|
go
| 1 | 0 | |
java/src/main/java/io/metaparticle/Metaparticle.java
|
package io.metaparticle;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.Array;
import java.lang.reflect.Method;
import java.nio.file.Files;
import java.nio.file.Paths;
import io.metaparticle.annotations.Package;
import io.metaparticle.annotations.Runtime;
import static io.metaparticle.Util.handleErrorExec;
import static io.metaparticle.Util.once;
public class Metaparticle {
public static boolean inDockerContainer() {
switch (System.getenv("METAPARTICLE_IN_CONTAINER")) {
case "true":
case "1":
return true;
case "false":
case "0":
return false;
}
File f = new File("/proc/1/cgroup");
if (f.exists()) {
try {
String s = new String(Files.readAllBytes(f.toPath()), "UTF-8");
return (s.indexOf("docker") != -1 || s.indexOf("kubepods") != -1);
} catch (IOException ex) {
throw new IllegalStateException(ex);
}
}
return false;
}
public static Executor getExecutor(Runtime cfg) {
if (cfg == null) {
return new DockerImpl();
}
switch (cfg.executor()) {
case "docker":
return new DockerImpl();
case "aci":
return new AciExecutor();
case "metaparticle":
return new MetaparticleExecutor();
default:
throw new IllegalStateException("Unknown executor: " + cfg.executor());
}
}
public static Builder getBuilder(Package pkg) {
if (pkg == null) {
return new DockerImpl();
}
switch (pkg.builder()) {
case "docker":
return new DockerImpl();
default:
throw new IllegalStateException("Unknown builder: " + pkg.builder());
}
}
public static void writeDockerfile(String className, Package p, String projectName) throws IOException {
String contents =
"FROM openjdk:8-jre-alpine\n" +
"COPY %s /main.jar\n" +
"CMD java -classpath /main.jar %s";
byte[] output =
String.format(contents, p.jarFile(), className).getBytes();
Files.write(Paths.get("Dockerfile"), output);
}
public static void Containerize(Runnable fn) {
if (inDockerContainer()) {
fn.run();
} else {
File f = new File("pom.xml");
try {
if (!f.exists()) {
throw new IllegalStateException("Can not find: " + f.getCanonicalPath());
}
} catch (IOException ex) {
throw new IllegalStateException(ex);
}
StackTraceElement[] traces = Thread.currentThread().getStackTrace();
String className = traces[2].getClassName();
String methodName = traces[2].getMethodName();
String name = "web";
String image = "test";
try {
Class clazz = Class.forName(className);
Method m = clazz.getMethod(methodName, String[].class);
Package p = m.getAnnotation(Package.class);
Runtime r = m.getAnnotation(Runtime.class);
if (p.repository().length() != 0) {
image = p.repository() + "/" + image;
}
Executor exec = getExecutor(r);
Builder builder = getBuilder(p);
OutputStream stdout = p.verbose() ? System.out : null;
OutputStream stderr = p.quiet() ? null : System.err;
writeDockerfile(className, p, "metaparticle-package");
handleErrorExec(new String[] {"mvn", "package"}, System.out, System.err);
builder.build(".", image, stdout, stderr);
builder.push(image, stdout, stderr);
Runnable cancel = once(() -> exec.cancel(name));
java.lang.Runtime.getRuntime().addShutdownHook(new Thread(cancel));
exec.run(image, name, r, stdout, stderr);
exec.logs(name, System.out, System.err);
cancel.run();
} catch (NoSuchMethodException | ClassNotFoundException | IOException ex) {
// This should really never happen.
throw new IllegalStateException(ex);
}
}
}
}
|
[
"\"METAPARTICLE_IN_CONTAINER\""
] |
[] |
[
"METAPARTICLE_IN_CONTAINER"
] |
[]
|
["METAPARTICLE_IN_CONTAINER"]
|
java
| 1 | 0 | |
modules/example/src/example.py
|
import os
import time
import shutil
import datetime
import sys
import os
from lib import handler
# Environment variables:
# - HANDLER_BASE_DIR [OPTIONAL]
base_dir = "/ion/"
if "HANDLER_BASE_DIR" in os.environ:
base_dir = os.environ["HANDLER_BASE_DIR"]
else:
print("HANDLER_BASE_DIR not set, defaulting to /ion/")
print("module starting")
# This module has no input files as it
# is the first in it's graph. However,
# if it was a subsequent module it could
# read files from `{base_dir}/in/data`
# and structured data from the JSON file
# `{base_dir}/in/meta.json`
# Do some processing, this could be anything
print("fake doing some work...")
spinner = handler.spinning_cursor()
for _ in range(50):
sys.stdout.write(next(spinner))
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b')
# Write some output files
for i in range(0, 5):
# We write the files out locally to disk.
# Once they are sync'ed with the Ion
# data plane, their external URIs will be
# stored as meta data for later retrival.
out_file = "image" + str(i) + ".png"
handler.write_file(base_dir, out_file, "face!")
print("wrote file {}".format(out_file))
# For each file we wrote,
# we will raise a new event
# suggesting we found a face.
event = [{
"key": "eventType", # Required key
"value": "face_detected"
},
{
"key": "files", # Required key
"value": out_file
}]
event_name = "event{}.json".format(i)
handler.write_event(base_dir, event_name, event)
print("wrote event {}".format(event_name))
# During our processing, we may
# have derived some insight we
# wish export and persist.
insight = [{
"key": "source",
"value": "facebook"
},
{
"key": "image_dimensions",
"value": "1080x1024"
},
{
"key": "image_size",
"value": "2.3MB"
},
{
"key": "image_md5",
"value": "1a79a4d60de6718e8e5b326e338ae533"
}]
handler.write_insight(base_dir, insight)
print("wrote new insight")
# Now we're finished
print("module finished")
|
[] |
[] |
[
"HANDLER_BASE_DIR"
] |
[]
|
["HANDLER_BASE_DIR"]
|
python
| 1 | 0 | |
synapse_worker_init/worker_init.py
|
#!/usr/local/bin/python
import os
import sys
import jinja2
# Utility functions
def log(txt):
print(txt, file=sys.stderr)
def error(txt):
log(txt)
sys.exit(2)
def convert(src, dst, environ):
"""Generate a file from a template
Args:
src (str): path to input file
dst (str): path to file to write
environ (dict): environment dictionary, for replacement mappings.
"""
with open(src) as infile:
template = infile.read()
rendered = jinja2.Template(template).render(**environ)
with open(dst, "w") as outfile:
outfile.write(rendered)
def generate_federation_sender_worker_config(environ,
config_path,
app,
name,
host,
host_port,
port,
server_name):
listener_resources = []
volitile_values = {
"app": app,
"name": name,
"host": host,
"host_port": host_port,
"port": port,
"server_name": server_name,
"listener_resources": listener_resources,
}
convert("/templates/synapse_worker.yaml", config_path, volitile_values)
with open(config_path, 'r') as f:
for i, line in enumerate(f, start=1):
print('{} = {}'.format(i, line))
def generate_generic_worker_config(environ,
config_path,
app,
name,
host,
host_port,
port,
server_name):
listener_resources = ["client", "federation"]
volitile_values = {
"app": app,
"name": name,
"host": host,
"host_port": host_port,
"port": port,
"server_name": server_name,
"listener_resources": listener_resources,
}
convert("/templates/synapse_worker.yaml", config_path, volitile_values)
with open(config_path, 'r') as f:
for i, line in enumerate(f, start=1):
print('{} = {}'.format(i, line))
def run_create_worker_config(environ, ownership):
config_dir = environ.get("WORKER_CONFIG_GEN", "/worker-gen")
name = environ.get("WORKER_NAME")
if name is None:
error("""\
Worker name not set
The worker must have a name passed to it via the environment in order
for it to function
""")
host = environ.get("SYNAPSE_HOST")
if host is None:
error("""\
Synapse host not set
The worker must have a the local synapse instance passed to it via the
environment in order for it to function
""")
host_port = environ.get("SYNAPSE_REPLICATON_PORT", 9093)
server_name = environ.get("SYNAPSE_HOST_NAME")
if server_name is None:
error("""\
Synapse host name not set
The worker must have a the local synapse's hostname passed to it via the
environment in order for it to function
""")
worker_config = config_dir + "/worker_config.yaml"
app = environ.get("SYNAPSE_WORKER", "synapse.app.generic_worker")
port = int(environ.get("WORKER_PORT", 8008))
if app == "synapse.app.generic_worker":
generate_generic_worker_config(environ, worker_config,
app, name, host,
host_port, port, server_name)
elif app == "synapse.app.federation_sender":
generate_federation_sender_worker_config(environ, worker_config,
app, name, host,
host_port, port, server_name)
else:
error("""\
Synapse worker type is currently not supported or is unknown
These are the workers that are supported:
synapse.app.generic_worker
synapse.app.federation_sender
""")
def main(args, environ):
desired_uid = int(environ.get("UID", "991"))
desired_gid = int(environ.get("GID", "991"))
if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):
ownership = None
else:
ownership = "{}:{}".format(desired_uid, desired_gid)
if ownership is None:
log("Will not perform chmod/gosu as UserID already matches request")
log("Generating worker config....")
run_create_worker_config(environ, ownership)
if __name__ == "__main__":
main(sys.argv, os.environ)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cinderella/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cinderella.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
ImageRenaming.pyw
|
import os, sys, urllib.request
from tkinter import *
from tkinter.messagebox import *
__version__ = 3
__filename__ = "ImageRenaming"
__basename__ = os.path.basename(sys.argv[0])
__savepath__ = os.path.join(os.environ['APPDATA'], "QuentiumPrograms")
__iconpath__ = __savepath__ + "/{}.ico".format(__filename__)
try:urllib.request.urlopen("https://www.google.fr/", timeout=1); connection = True
except:connection = False
if not os.path.exists(__iconpath__):
try:os.mkdir(__savepath__)
except:pass
if connection == True:
try:urllib.request.urlretrieve("https://quentium.fr/+++PythonDL/{}.ico".format(__filename__), __iconpath__)
except:pass
if connection == True:
try:script_version = int(urllib.request.urlopen("https://quentium.fr/programs/index.php").read().decode().split(__filename__ + "<!-- Version: ")[1].split(" --></h2>")[0])
except:script_version = __version__
if script_version > __version__:
if os.path.exists(__iconpath__):popup = Tk(); popup.attributes("-topmost", 1); popup.iconbitmap(__iconpath__); popup.withdraw()
ask_update = askquestion(__filename__ + " V" + str(script_version), "Une mise à jour à été trouvée, souhaitez vous la télécharger puis l'éxécuter ?", icon="question")
if ask_update == "yes":
try:os.rename(__basename__, __filename__ + "-old.exe")
except:os.remove(__filename__ + "-old.exe"); os.rename(__basename__, __filename__ + "-old.exe")
if "-32" in str(__basename__):urllib.request.urlretrieve("https://quentium.fr/download.php?file={}-32.exe".format(__filename__), __filename__ + ".exe")
else:urllib.request.urlretrieve("https://quentium.fr/download.php?file={}.exe".format(__filename__), __filename__ + ".exe")
showwarning(__filename__, "Le programme va redémarrer pour fonctionner sous la nouvelle version.", icon="warning")
os.system("start " + __filename__ + ".exe"); os._exit(1)
__filename__ = __filename__ + " V" + str(__version__)
from datetime import datetime
from tkinter.filedialog import *
from tkinter import *
def start_rename():
directory = askdirectory()
if directory:
if askyesno(__filename__, "Êtes-vous sûr de renommer toutes les images dans ce dossier ? Cette action ne peux pas être annulée !"):
files1 = [f for f in os.listdir(directory) if f[-4:].lower() in (".jpg",".JPG",".png",".PNG",".jpeg",".JPEG",".bmp",".gif")]
for (index, filename) in enumerate(files1):
file = directory + "/" + filename
extension = os.path.splitext(filename)[1]
if check_var.get() == 0:
time1 = os.path.getctime(file)
elif check_var.get() == 1:
time1 = os.path.getmtime(file)
time2 = datetime.fromtimestamp(time1)
time = time2.strftime("%Y%m%d%H%M%S%f")
newname = time + "_" + str(os.path.getsize(file)) + extension
os.rename(file, directory + "/" + newname)
files2 = [f for f in os.listdir(directory) if f[-4:].lower() in (".jpg",".JPG",".png",".PNG",".jpeg",".JPEG",".bmp",".gif")]
for (index, filename) in enumerate(files2):
file = directory + "/" + filename
extension = os.path.splitext(filename)[1]
newname = "Image-%05d%s" % (index + 1, extension)
if os.path.exists(newname):
continue
if True:
os.rename(file, directory + "/" + newname)
imagerenaming.destroy()
os._exit(0)
else:
showwarning(__filename__, "Erreur : Aucun dossier n'a été sélectionné !")
imagerenaming = Tk()
width = 800
height = 500
imagerenaming.update_idletasks()
x = (imagerenaming.winfo_screenwidth() - width) // 2
y = (imagerenaming.winfo_screenheight() - height) // 2
imagerenaming.geometry("{}x{}+{}+{}".format(width , height, int(x), int(y)))
imagerenaming.resizable(width=False, height=False)
imagerenaming.configure(bg = "lightgray")
if os.path.exists(__iconpath__):
imagerenaming.iconbitmap(__iconpath__)
imagerenaming.title(__filename__)
Label(imagerenaming, text="Bienvenue dans le programme de renommage !", font="impact 30", fg="red", bg="lightgray").pack(pady=60)
check_var = IntVar()
check_var.set(0)
Radiobutton(imagerenaming, text="Date de création", variable=check_var, value=0, font="impact 20", bg="lightgray").pack(pady=10)
Radiobutton(imagerenaming, text="Date de modification", variable=check_var, value=1, font="impact 20", bg="lightgray").pack()
Button(imagerenaming, text="Renommer des images", command=start_rename, relief=GROOVE, width=25, font="impact 20", fg="black").pack(pady=50)
imagerenaming.mainloop()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
isecl-k8s-scheduler/config/config.go
|
/*
Copyright © 2019 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
*/
package config
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"github.com/sirupsen/logrus"
"intel/isecl/k8s-extended-scheduler/v3/constants"
"github.com/pkg/errors"
)
var tagPrefixRegex = regexp.MustCompile("(^[a-zA-Z0-9_///.-]*$)")
const LogFile = "/var/log/isecl-k8s-extensions/isecl-k8s-scheduler.log"
const HttpLogFile = "/var/log/isecl-k8s-extensions/isecl-k8s-scheduler-http.log"
var Log *logrus.Logger
type Config struct {
Port int //Port for the Extended scheduler to listen on
//Server Certificate to be used for TLS handshake
ServerCert string
//Server Key to be used for TLS handshake
ServerKey string
//Integration Hub Key to be used for parsing signed trust report
IntegrationHubPublicKeys map[string][]byte
LogLevel string
LogMaxLength int
TagPrefix string
}
func GetExtendedSchedulerConfig() (*Config, error) {
//PORT for the extended scheduler to listen.
port, err := strconv.Atoi(os.Getenv("PORT"))
if err != nil{
fmt.Fprintln(os.Stdout, "Error while parsing Env variable PORT, setting to default value 8888")
port = 8888
}
iHubPublicKeys := make(map[string][]byte, 2)
// Get IHub public key from ihub with hvs attestation type
iHubPubKeyPath := os.Getenv("HVS_IHUB_PUBLIC_KEY_PATH")
if iHubPubKeyPath != ""{
iHubPublicKeys[constants.HVSAttestation], err = ioutil.ReadFile(iHubPubKeyPath)
if err != nil {
return nil, errors.Errorf("Error while reading file %s\n", iHubPubKeyPath)
}
}
// Get IHub public key from ihub with skc attestation type
iHubPubKeyPath = os.Getenv("SGX_IHUB_PUBLIC_KEY_PATH")
if iHubPubKeyPath != ""{
iHubPublicKeys[constants.SGXAttestation], err= ioutil.ReadFile(iHubPubKeyPath)
if err != nil {
return nil, errors.Errorf("Error while reading file %s\n", iHubPubKeyPath)
}
}
logLevel := os.Getenv("LOG_LEVEL")
if logLevel == ""{
fmt.Fprintln(os.Stdout,"Env variable LOG_LEVEL is empty, setting to default value Info")
logLevel = "INFO"
}
logMaxLen, err := strconv.Atoi(os.Getenv("LOG_MAX_LENGTH"))
if err != nil{
fmt.Fprintln(os.Stdout, "Env variable LOG_MAX_LENGTH is empty, setting to default value 1500")
logMaxLen = 1500
}
serverCert := os.Getenv("TLS_CERT_PATH")
if serverCert == ""{
return nil, errors.New("Env variable TLS_CERT_PATH is empty")
}
serverKey := os.Getenv("TLS_KEY_PATH")
if serverKey == ""{
return nil, errors.New("Env variable TLS_KEY_PATH is empty")
}
tagPrefix := os.Getenv("TAG_PREFIX")
if !tagPrefixRegex.MatchString(tagPrefix) {
return nil, errors.New("Invalid string formatted input")
}
return &Config{
Port: port,
IntegrationHubPublicKeys: iHubPublicKeys,
LogLevel: logLevel,
ServerCert: serverCert,
ServerKey: serverKey,
TagPrefix: tagPrefix,
LogMaxLength: logMaxLen,
}, nil
}
|
[
"\"PORT\"",
"\"HVS_IHUB_PUBLIC_KEY_PATH\"",
"\"SGX_IHUB_PUBLIC_KEY_PATH\"",
"\"LOG_LEVEL\"",
"\"LOG_MAX_LENGTH\"",
"\"TLS_CERT_PATH\"",
"\"TLS_KEY_PATH\"",
"\"TAG_PREFIX\""
] |
[] |
[
"PORT",
"TLS_KEY_PATH",
"TAG_PREFIX",
"LOG_LEVEL",
"SGX_IHUB_PUBLIC_KEY_PATH",
"TLS_CERT_PATH",
"LOG_MAX_LENGTH",
"HVS_IHUB_PUBLIC_KEY_PATH"
] |
[]
|
["PORT", "TLS_KEY_PATH", "TAG_PREFIX", "LOG_LEVEL", "SGX_IHUB_PUBLIC_KEY_PATH", "TLS_CERT_PATH", "LOG_MAX_LENGTH", "HVS_IHUB_PUBLIC_KEY_PATH"]
|
go
| 8 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.