filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
app.py
|
import os
import redis
import json
from flask import Flask, render_template, redirect, request, url_for, make_response
if 'VCAP_SERVICES' in os.environ:
VCAP_SERVICES = json.loads(os.environ['VCAP_SERVICES'])
if VCAP_SERVICES:
CREDENTIALS = VCAP_SERVICES["rediscloud"][0]["credentials"]
r = redis.Redis(host=CREDENTIALS["hostname"], port=CREDENTIALS["port"], password=CREDENTIALS["password"])
else:
r = redis.Redis(host='127.0.0.1', port='6379')
else:
r = redis.Redis(host='127.0.0.1', port='6379')
#rediscloud_service = json.loads(os.environ['VCAP_SERVICES'])['rediscloud'][0]
#credentials = rediscloud_service['credentials']
#r = redis.Redis(host=credentials['hostname'], port=credentials['port'], password=credentials['password'])
app = Flask(__name__)
@app.route('/')
def mainpage():
response = """
<HTML><BODY><h2>
<a href="/survey">Take Survey</a><br>
<a href="/dumpsurveys">Survey Results</a><br>
</h2>
</BODY>
"""
return response
@app.route('/survey')
def survey():
resp = make_response(render_template('survey.html'))
return resp
@app.route('/suthankyou.html', methods=['POST'])
def suthankyou():
global r
d = request.form['division']
s = request.form['state']
f = request.form['feedback']
print ("Division is " + d)
print ("State is " + s)
print ("Feedback: " + f)
Counter = r.incr('new_counter')
print ("the counter is now: ", Counter)
## Create a new key that includes the counter
newsurvey = 'new_survey' + str(Counter)
print ("Storing the survey now")
## Now the key name is the content of the variable newsurvey
r.hmset(newsurvey,{'division':d,'state':s,'feedback':f})
resp = """
<h3> - THANKS FOR TAKING THE SURVEY - </h3>
"""
return resp
@app.route('/dumpsurveys')
def dumpsurveys():
global r
response = "Dump of all reviews so far<br>"
response += "--------------------------<br>"
print ("Reading back from Redis")
for eachsurvey in r.keys('new_survey*'):
response += "Division : " + r.hget(eachsurvey,'division') + "<br>"
response += "State : " + r.hget(eachsurvey,'state') + "<br>"
response += "Feedback : " + r.hget(eachsurvey,'feedback') + "<br>"
response += " ----------------------<br>"
return response
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', \
port=int(os.getenv('PORT', '5000')), threaded=True)
|
[] |
[] |
[
"PORT",
"VCAP_SERVICES"
] |
[]
|
["PORT", "VCAP_SERVICES"]
|
python
| 2 | 0 | |
test/manual/test_pymongo.py
|
# Standalone script for testing pymongo
import os
import oboe
import oboeware.inst_pymongo
from pymongo.connection import Connection
from pymongo.database import Database
from bson.dbref import DBRef
import time
import random
class MongoTest:
def test1(self):
db = Database(self._get_connection(), "pymongo_test")
test = db.create_collection("test_1_4")
test.save({"hello": u"world"})
test.rename("test_1_new")
db.drop_collection("test_1_new")
def test2(self):
db = Database(self._get_connection(), "pymongo_test")
test = db.create_collection("test_2")
try:
for i in range(100):
name = "test %d" % (i)
ret = test.save({"name": name, "group_id" : i % 3, "posts": i % 20})
print "Save Ret: %s" % (ret)
ret = test.update({"posts": 10}, {"$set": {"posts": 100}}, multi=True, safe=True)
#ret = test.update({"posts": 10}, {"$set": {"posts": 100}}, multi=True)
print "Update Ret: %s" % (ret)
test.update({"name": "test 2"}, {"$set": {"posts": 200}})
test.create_index("posts")
test.ensure_index("posts")
for r in test.find({"posts":100}):
print "Found: %s" % (r,)
ret = test.remove({"posts": 1}, safe=True)
print "Remove Ret: %s" % (ret)
groups = test.group(
key={"group_id":1},
condition=None,
initial={"post_sum":0},
reduce="function(obj,prev) {prev.post_sum++;}"
)
for g in groups:
print "Group: %s" % (g,)
for d in test.distinct('posts'):
print "Distinct: %s" % (d,)
if 'reindex' in dir(test):
test.reindex()
test.drop_indexes()
finally:
db.drop_collection("test_2")
def test3(self):
db = Database(self._get_connection(), "pymongo_test")
test = db.test2
for r in test.find({"age": 10}):
print r
def test4(self):
db = Database(self._get_connection(), "pymongo_test")
test = db.create_collection("test_4")
try:
for i in range(5):
name = "test %d" % (i)
test.save({ "user_id": i, "name": name, "group_id" : i % 10, "posts": i % 20})
test.create_index("user_id")
for i in xrange(6):
for r in test.find( { "group_id": random.randint(0,10) } ):
print "Found: %s " % (r)
finally:
db.drop_collection("test_4")
# From https://gist.github.com/769687
def dbref_test(self):
db = Database(self._get_connection(), "pymongo_test")
try:
db.create_collection('owners')
db.create_collection('tasks')
db.create_collection('tasks_ref')
# owners and tasks
db.owners.insert({"name":"Jim"})
db.tasks.insert([
{"name": "read"},
{"name": "sleep"}
])
# update jim with tasks: reading and sleeping
reading_task = db.tasks.find_one({"name": "read"})
sleeping_task = db.tasks.find_one({"name": "sleep"})
jim_update = db.owners.find_one({"name": "Jim"})
jim_update["tasks"] = [
DBRef(collection = "tasks", id = reading_task["_id"]),
DBRef(collection = "tasks", id = sleeping_task["_id"])
]
db.owners.save(jim_update)
# get jim fresh again and display his tasks
fresh_jim = db.owners.find_one({"name":"Jim"})
print "tasks are:"
for task in fresh_jim["tasks"]:
print db.dereference(task)["name"]
db.tasks_ref.insert( { "ref" : DBRef(collection = "tasks", id = reading_task["_id"]) })
db.tasks_ref.insert( { "ref" : DBRef(collection = "tasks", id = sleeping_task["_id"]) })
r1 = db.tasks_ref.find( { "ref" : DBRef(collection = "tasks", id = reading_task["_id"]) })
print r1.count()
finally:
db.drop_collection('owners')
db.drop_collection('tasks')
db.drop_collection('tasks_ref')
def binary_contents_test(self):
db = Database(self._get_connection(), "pymongo_test")
test = db.create_collection("test_binary")
import os
import bson
obj = os.urandom(1024)
test.save({"hello": bson.Binary(obj)})
db.drop_collection("test_binary")
def _get_connection(self, *args, **kwargs):
host = os.environ.get("MONGODB_HOST", "localhost")
port = int(os.environ.get("MONGODB_PORT", 27017))
return Connection(host, port, *args, **kwargs)
def main(with_oboe=True):
for i in xrange(1):
if with_oboe:
oboe.config['tracing_mode'] = 'always'
oboe.config['sample_rate'] = 1.0
oboe.start_trace("MongoTest")
mt = MongoTest()
mt.test1()
mt.test2()
mt.test4()
mt.dbref_test()
mt.binary_contents_test()
if with_oboe:
oboe.end_trace('MongoTest')
print 'Finished.'
if __name__ == '__main__':
import sys
if len(sys.argv) == 2 and sys.argv[1] == '--no-oboe':
print 'not using oboe instrumentation'
main(False)
elif len(sys.argv) != 1:
raise Exception('invalid args')
else:
main()
|
[] |
[] |
[
"MONGODB_HOST",
"MONGODB_PORT"
] |
[]
|
["MONGODB_HOST", "MONGODB_PORT"]
|
python
| 2 | 0 | |
codes/utils/util.py
|
import os
import sys
import time
import math
from datetime import datetime
import random
import logging
from collections import OrderedDict
import numpy as np
import cv2
import torch
from shutil import get_terminal_size
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def OrderedYaml():
'''yaml orderedDict support'''
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
Dumper.add_representer(OrderedDict, dict_representer)
Loader.add_constructor(_mapping_tag, dict_constructor)
return Loader, Dumper
####################
# miscellaneous
####################
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
logger = logging.getLogger('base')
logger.info('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):
'''set up logger'''
lg = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
lg.setLevel(level)
if tofile:
log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
lg.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
lg.addHandler(sh)
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
#img_np = tensor.numpy()
img_np = tensor.numpy()
img_np = np.expand_dims(img_np, axis=2)
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
img_np = np.squeeze(img_np)
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
elif out_type == np.uint16:
img_np = (img_np * 65535.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
def tensor2numpy(tensor):
img_np = tensor.numpy()
img_np[img_np < 0] = 0
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
return img_np.astype(np.float32)
def save_img_with_ratio(image_path, image, alignratio_path):
align_ratio = (2 ** 16 - 1) / image.max()
np.save(alignratio_path, align_ratio)
uint16_image_gt = np.round(image * align_ratio).astype(np.uint16)
cv2.imwrite(image_path, uint16_image_gt)
return None
def generate_paths(folder, name):
id = name[:4]
image_path = os.path.join(folder, id+'.png')
alignratio_path = os.path.join(folder, id+'_alignratio.npy')
return image_path, alignratio_path
def save_img(img, img_path, mode='RGB'):
cv2.imwrite(img_path, img)
def save_npy(img, img_path):
img = np.squeeze(img)
np.save(img_path, img)
def calculate_psnr(img1, img2):
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
# return 20 * math.log10(255.0 / math.sqrt(mse))
return 20 * math.log10(1.0 / math.sqrt(mse))
def calculate_normalized_psnr(img1, img2, norm):
normalized_psnr = -10*np.log10(np.mean(np.power(img1/norm - img2/norm, 2)))
if normalized_psnr == 0:
return float('inf')
return normalized_psnr
def mu_tonemap(hdr_image, mu=5000):
return np.log(1 + mu * hdr_image) / np.log(1 + mu)
def tanh_norm_mu_tonemap(hdr_image, norm_value, mu=5000):
bounded_hdr = np.tanh(hdr_image / norm_value)
return mu_tonemap(bounded_hdr, mu)
def calculate_tonemapped_psnr(res, ref, percentile=99, gamma=2.24):
res = res ** gamma
ref = ref ** gamma
norm_perc = np.percentile(ref, percentile)
tonemapped_psnr = -10*np.log10(np.mean(np.power(tanh_norm_mu_tonemap(ref, norm_perc) - tanh_norm_mu_tonemap(res, norm_perc), 2)))
return tonemapped_psnr
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
datasafe-storage/datasafe-storage-impl-s3/src/test/java/de/adorsys/datasafe/storage/impl/s3/MultipartUploadS3SystemStorageServiceTest.java
|
package de.adorsys.datasafe.storage.impl.s3;
import de.adorsys.datasafe.types.api.resource.AbsoluteLocation;
import de.adorsys.datasafe.types.api.resource.BasePrivateResource;
import de.adorsys.datasafe.types.api.resource.PrivateResource;
import de.adorsys.datasafe.types.api.resource.WithCallback;
import de.adorsys.datasafe.types.api.shared.ContentGenerator;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
import org.testcontainers.shaded.com.google.common.io.ByteStreams;
import org.testcontainers.shaded.org.bouncycastle.util.encoders.Hex;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.nio.file.Path;
import java.security.MessageDigest;
import java.util.stream.Stream;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
@Slf4j
public class MultipartUploadS3SystemStorageServiceTest extends S3SystemStorageServiceTest {
@TempDir
protected Path tempDir;
private static final int ONE_MB_IN_BYTES = 1024 * 1024;
private static final int ONE_MB = 1;
private static final int DEFAULT_TEST_FILE_SIZE_MB = 10;
@MethodSource("testFileSize")
@ParameterizedTest(name = "Run #{index} with data size: {0} Mb")
void testMultiPartUpload(int testFileSizeInMb) {
int testFileSizeInBytes = testFileSizeInMb * ONE_MB_IN_BYTES;
String testFileName = tempDir.toString() + "/test.txt";
generateTestFile(testFileName, testFileSizeInBytes);
log.info("Created test file {} with size {} bytes", testFileName, testFileSizeInBytes);
AbsoluteLocation<PrivateResource> privateLocation = new AbsoluteLocation<>(
BasePrivateResource.forPrivate(URI.create("s3://" + bucketName + "/file.txt")));
writeTestFileToS3(testFileName, privateLocation);
log.info("Data has been written to S3");
assertThat(checksumOfTestFile(testFileName)).isEqualTo(checksumOfFileFromS3(privateLocation));
}
@ValueSource
protected static Stream<Integer> testFileSize() {
return Stream.of(
ONE_MB, // 1Mb. The minimum contentSize for a multi part request is 5 MB, file with size < 5 mb uses simple output impl
getTestFileSizeInMb() //Size from env var LOAD_S3_TEST_FILE_SIZE_MB or default value DEFAULT_TEST_FILE_SIZE_MB
);
}
private static int getTestFileSizeInMb() {
try {
return Integer.parseInt(System.getenv("LOAD_S3_TEST_FILE_SIZE_MB"));
} catch (NumberFormatException ex) {
return DEFAULT_TEST_FILE_SIZE_MB;
}
}
private void writeTestFileToS3(String testFilePath, AbsoluteLocation<PrivateResource> privateLocation) {
log.info("Copy stream of test file to s3");
try (OutputStream os = storageService.write(WithCallback.noCallback(privateLocation))) {
try(FileInputStream is = new FileInputStream(testFilePath)) {
ByteStreams.copy(is, os);
}
} catch (IOException e) {
fail(e.getMessage(), e);
}
}
private String checksumOfFileFromS3(AbsoluteLocation<PrivateResource> privateLocation) {
try(InputStream is = storageService.read(privateLocation)) {
return checksum(is);
} catch (IOException e) {
fail(e.getMessage(), e);
}
return "";
}
private void generateTestFile(String testFileName, int loadS3TestFileSizeMb) {
log.info("Starting write {} Mb file into {}", loadS3TestFileSizeMb, tempDir.toString());
try(FileOutputStream stream = new FileOutputStream(testFileName)) {
ByteStreams.copy(new ContentGenerator(loadS3TestFileSizeMb).generate(this.getClass().getSimpleName()), stream);
} catch (IOException e) {
fail(e.getMessage());
}
}
private String checksumOfTestFile(String testFileName) {
try(FileInputStream is = new FileInputStream(testFileName)) {
return checksum(is);
} catch (IOException e) {
fail(e.getMessage(), e);
}
return "";
}
@SneakyThrows
private String checksum(InputStream input) {
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] block = new byte[1024 * 8];
int length;
int bufferCounter = 0;
while ((length = input.read(block)) > 0) {
digest.update(block, 0, length);
log.trace("Counter checksum calculation: " + (bufferCounter++));
}
return Hex.toHexString(digest.digest());
}
}
|
[
"\"LOAD_S3_TEST_FILE_SIZE_MB\""
] |
[] |
[
"LOAD_S3_TEST_FILE_SIZE_MB"
] |
[]
|
["LOAD_S3_TEST_FILE_SIZE_MB"]
|
java
| 1 | 0 | |
nyc_data/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nyc_data.settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
molecule/default/tests/test_role.py
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_unison(Sudo, Command, File):
with Sudo('vagrant'):
# Clean up any previous runs
Command('rm /vagrant/home/included/test1')
Command('rm /vagrant/home/included/excluded/test2')
Command('rm /vagrant/home/includeme')
Command('rm /vagrant/home/excludeme')
# Setup test directories and files
assert Command('mkdir -p /home/vagrant/included/excluded').rc == 0
assert Command('touch /home/vagrant/included/test1').rc == 0
assert Command('touch /home/vagrant/included/excluded/test2').rc == 0
assert Command('touch /home/vagrant/includeme').rc == 0
assert Command('touch /home/vagrant/excludeme').rc == 0
# Run Unison
# Note: have to run sudo with `--set-home` or Unison will
# attempt to write its log file to `/root`.
cmd = Command('sudo --user vagrant --set-home unison vagrant')
assert cmd.rc == 0
with Sudo('vagrant'):
# Verify
assert File('/vagrant/home/included/test1').is_file
assert not File('/vagrant/home/included/excluded/test2').exists
assert File('/vagrant/home/includeme').is_file
assert not File('/vagrant/home/excludeme').exists
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
vendor/github.com/dmitryk-dk/pb/v3/termutil/term_win.go
|
//go:build windows
package termutil
import (
"fmt"
"os"
"os/exec"
"strconv"
"syscall"
"unsafe"
)
var (
tty = os.Stdin
unlockSignals = []os.Signal{
os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL,
}
)
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
// GetConsoleScreenBufferInfo retrieves information about the
// specified console screen buffer.
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
// GetConsoleMode retrieves the current input mode of a console's
// input buffer or the current output mode of a console screen buffer.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
getConsoleMode = kernel32.NewProc("GetConsoleMode")
// SetConsoleMode sets the input mode of a console's input buffer
// or the output mode of a console screen buffer.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
setConsoleMode = kernel32.NewProc("SetConsoleMode")
// SetConsoleCursorPosition sets the cursor position in the
// specified console screen buffer.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
mingw = isMingw()
)
type (
// Defines the coordinates of the upper left and lower right corners
// of a rectangle.
// See
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx
smallRect struct {
Left, Top, Right, Bottom int16
}
// Defines the coordinates of a character cell in a console screen
// buffer. The origin of the coordinate system (0,0) is at the top, left cell
// of the buffer.
// See
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx
coordinates struct {
X, Y int16
}
word int16
// Contains information about a console screen buffer.
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx
consoleScreenBufferInfo struct {
dwSize coordinates
dwCursorPosition coordinates
wAttributes word
srWindow smallRect
dwMaximumWindowSize coordinates
}
)
// TerminalWidth returns width of the terminal.
func TerminalWidth() (width int, err error) {
if mingw {
return termWidthTPut()
}
return termWidthCmd()
}
func termWidthCmd() (width int, err error) {
var info consoleScreenBufferInfo
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
if e != 0 {
return 0, error(e)
}
return int(info.dwSize.X) - 1, nil
}
func isMingw() bool {
return os.Getenv("MINGW_PREFIX") != "" || os.Getenv("MSYSTEM") == "MINGW64"
}
func termWidthTPut() (width int, err error) {
// TODO: maybe anybody knows a better way to get it on mintty...
var res []byte
cmd := exec.Command("tput", "cols")
cmd.Stdin = os.Stdin
if res, err = cmd.CombinedOutput(); err != nil {
return 0, fmt.Errorf("%s: %v", string(res), err)
}
if len(res) > 1 {
res = res[:len(res)-1]
}
return strconv.Atoi(string(res))
}
func GetCursorPos() (pos coordinates, err error) {
var info consoleScreenBufferInfo
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
if e != 0 {
return info.dwCursorPosition, error(e)
}
return info.dwCursorPosition, nil
}
func SetCursorPos(pos coordinates) error {
_, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0)
if e != 0 {
return error(e)
}
return nil
}
var oldState word
func lockEcho() (err error) {
if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 {
err = fmt.Errorf("Can't get terminal settings: %v", e)
return
}
newState := oldState
const ENABLE_ECHO_INPUT = 0x0004
const ENABLE_LINE_INPUT = 0x0002
newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT))
if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 {
err = fmt.Errorf("Can't set terminal settings: %v", e)
return
}
return
}
func unlockEcho() (err error) {
if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 {
err = fmt.Errorf("Can't set terminal settings")
}
return
}
|
[
"\"MINGW_PREFIX\"",
"\"MSYSTEM\""
] |
[] |
[
"MINGW_PREFIX",
"MSYSTEM"
] |
[]
|
["MINGW_PREFIX", "MSYSTEM"]
|
go
| 2 | 0 | |
percms/celery.py
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'percms.settings')
app = Celery('percms', backend='redis://localhost')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self, x, y):
print('%s + %s = %s ' % (x, y, x+y))
return x + y
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
extractor/extractor.go
|
package extractor
import (
"fmt"
"go/ast"
"go/constant"
"go/scanner"
"go/token"
"go/types"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/github/codeql-go/extractor/dbscheme"
"github.com/github/codeql-go/extractor/srcarchive"
"github.com/github/codeql-go/extractor/trap"
"github.com/github/codeql-go/extractor/util"
"golang.org/x/tools/go/packages"
)
// Extract extracts the packages specified by the given patterns
func Extract(patterns []string) error {
return ExtractWithFlags(nil, patterns)
}
// ExtractWithFlags extracts the packages specified by the given patterns and build flags
func ExtractWithFlags(buildFlags []string, patterns []string) error {
cfg := &packages.Config{
Mode: packages.NeedName | packages.NeedFiles |
packages.NeedCompiledGoFiles |
packages.NeedImports | packages.NeedDeps |
packages.NeedTypes | packages.NeedTypesSizes |
packages.NeedTypesInfo | packages.NeedSyntax,
BuildFlags: buildFlags,
}
pkgs, err := packages.Load(cfg, patterns...)
modFlags := make([]string, 0, 1)
for _, flag := range buildFlags {
if strings.HasPrefix(flag, "-mod=") {
modFlags = append(modFlags, flag)
}
}
if err != nil {
return err
}
if len(pkgs) == 0 {
log.Printf("No packages found.")
}
extractUniverseScope()
// a map of package path to package root directory (currently the module root or the source directory)
pkgRoots := make(map[string]string)
// a map of package path to source code directory
pkgDirs := make(map[string]string)
// root directories of packages that we want to extract
wantedRoots := make(map[string]bool)
// recursively visit all packages in depth-first order;
// on the way down, associate each package scope with its corresponding package,
// and on the way up extract the package's scope
packages.Visit(pkgs, func(pkg *packages.Package) bool {
return true
}, func(pkg *packages.Package) {
if _, ok := pkgRoots[pkg.PkgPath]; !ok {
mdir := util.GetModDir(pkg.PkgPath, modFlags...)
pdir := util.GetPkgDir(pkg.PkgPath, modFlags...)
// GetModDir returns the empty string if the module directory cannot be determined, e.g. if the package
// is not using modules. If this is the case, fall back to the package directory
if mdir == "" {
mdir = pdir
}
pkgRoots[pkg.PkgPath] = mdir
pkgDirs[pkg.PkgPath] = pdir
}
tw, err := trap.NewWriter(pkg.PkgPath, pkg)
if err != nil {
log.Fatal(err)
}
defer tw.Close()
scope := extractPackageScope(tw, pkg)
tw.ForEachObject(extractObjectType)
lbl := tw.Labeler.GlobalID(pkg.PkgPath + ";pkg")
dbscheme.PackagesTable.Emit(tw, lbl, pkg.Name, pkg.PkgPath, scope)
if len(pkg.Errors) != 0 {
log.Printf("Warning: encountered errors extracting package `%s`:", pkg.PkgPath)
for i, err := range pkg.Errors {
log.Printf(" %s", err.Error())
extractError(tw, err, lbl, i)
}
}
})
for _, pkg := range pkgs {
if pkgRoots[pkg.PkgPath] == "" {
log.Fatalf("Unable to get a source directory for input package %s.", pkg.PkgPath)
}
wantedRoots[pkgRoots[pkg.PkgPath]] = true
}
// this sets the number of threads that the Go runtime will spawn; this is separate
// from the number of goroutines that the program spawns, which are scheduled into
// the system threads by the Go runtime scheduler
threads := os.Getenv("LGTM_THREADS")
if maxprocs, err := strconv.Atoi(threads); err == nil && maxprocs > 0 {
log.Printf("Max threads set to %d", maxprocs)
runtime.GOMAXPROCS(maxprocs)
} else if threads != "" {
log.Printf("Warning: LGTM_THREADS value %s is not valid, defaulting to using all available threads.", threads)
}
// if the value is empty or not set, use the Go default, which is the number of cores
// available since Go 1.5, but is subject to change
var maxgoroutines int
if maxgoroutines, err = strconv.Atoi(util.Getenv(
"CODEQL_EXTRACTOR_GO_MAX_GOROUTINES",
"SEMMLE_MAX_GOROUTINES",
)); err != nil {
maxgoroutines = 32
} else {
log.Printf("Max goroutines set to %d", maxgoroutines)
}
var wg sync.WaitGroup
// this semaphore is used to limit the number of files that are open at once;
// this is to prevent the extractor from running into issues with caps on the
// number of open files that can be held by one process
fdSem := newSemaphore(100)
// this semaphore is used to limit the number of goroutines spawned, so we
// don't run into memory issues
goroutineSem := newSemaphore(maxgoroutines)
sep := regexp.QuoteMeta(string(filepath.Separator))
// if a path matches this regexp, we don't want to extract this package. Currently, it checks
// - that the path does not contain a `..` segment, and
// - the path does not contain a `vendor` directory.
noExtractRe := regexp.MustCompile(`.*(^|` + sep + `)(\.\.|vendor)($|` + sep + `).*`)
// extract AST information for all packages
packages.Visit(pkgs, func(pkg *packages.Package) bool {
return true
}, func(pkg *packages.Package) {
for root, _ := range wantedRoots {
relDir, err := filepath.Rel(root, pkgDirs[pkg.PkgPath])
if err != nil || noExtractRe.MatchString(relDir) {
// if the path can't be made relative or matches the noExtract regexp skip it
continue
}
extractPackage(pkg, &wg, goroutineSem, fdSem)
return
}
})
wg.Wait()
cwd, err := os.Getwd()
if err != nil {
log.Printf("Warning: unable to get working directory: %s", err.Error())
log.Println("Skipping go.mod extraction")
}
rcwd, err := filepath.EvalSymlinks(cwd)
if err == nil {
cwd = rcwd
}
goModPaths := make([]string, 0, 10)
filepath.Walk(cwd, func(path string, info os.FileInfo, err error) error {
if filepath.Base(path) == "go.mod" && info != nil && info.Mode().IsRegular() {
if err != nil {
log.Printf("Found go.mod with path %s, but encountered error %s", path, err.Error())
}
goModPaths = append(goModPaths, path)
}
return nil
})
for _, path := range goModPaths {
log.Printf("Extracting %s", path)
start := time.Now()
err := extractGoMod(path)
if err != nil {
log.Printf("Failed to extract go.mod: %s", err.Error())
}
end := time.Since(start)
log.Printf("Done extracting %s (%dms)", path, end.Nanoseconds()/1000000)
}
return nil
}
// extractUniverseScope extracts symbol table information for the universe scope
func extractUniverseScope() {
tw, err := trap.NewWriter("universe", nil)
if err != nil {
log.Fatal(err)
}
defer tw.Close()
lbl := tw.Labeler.ScopeID(types.Universe, nil)
dbscheme.ScopesTable.Emit(tw, lbl, dbscheme.UniverseScopeType.Index())
extractObjects(tw, types.Universe, lbl)
}
// extractObjects extracts all objects declared in the given scope
func extractObjects(tw *trap.Writer, scope *types.Scope, scopeLabel trap.Label) {
for _, name := range scope.Names() {
obj := scope.Lookup(name)
lbl, exists := tw.Labeler.ScopedObjectID(obj, extractType(tw, obj.Type()))
if !exists {
extractObject(tw, obj, lbl)
}
if obj.Parent() != scope {
// this can happen if a scope is embedded into another with a `.` import.
continue
}
dbscheme.ObjectScopesTable.Emit(tw, lbl, scopeLabel)
}
}
// extractMethod extracts a method `meth` and emits it to the objects table, then returns its label
func extractMethod(tw *trap.Writer, meth *types.Func) trap.Label {
// get the receiver type of the method
recvtyp := meth.Type().(*types.Signature).Recv().Type()
recvlbl := extractType(tw, recvtyp) // ensure receiver type has been extracted
// if the method label does not exist, extract it
methlbl, exists := tw.Labeler.MethodID(meth, recvlbl)
if !exists {
extractObject(tw, meth, methlbl)
}
return methlbl
}
// extractObject extracts a single object and emits it to the objects table.
func extractObject(tw *trap.Writer, obj types.Object, lbl trap.Label) {
name := obj.Name()
isBuiltin := obj.Parent() == types.Universe
var kind int
switch obj.(type) {
case *types.PkgName:
kind = dbscheme.PkgObjectType.Index()
case *types.TypeName:
if isBuiltin {
kind = dbscheme.BuiltinTypeObjectType.Index()
} else {
kind = dbscheme.DeclTypeObjectType.Index()
}
case *types.Const:
if isBuiltin {
kind = dbscheme.BuiltinConstObjectType.Index()
} else {
kind = dbscheme.DeclConstObjectType.Index()
}
case *types.Nil:
kind = dbscheme.BuiltinConstObjectType.Index()
case *types.Var:
kind = dbscheme.DeclVarObjectType.Index()
case *types.Builtin:
kind = dbscheme.BuiltinFuncObjectType.Index()
case *types.Func:
kind = dbscheme.DeclFuncObjectType.Index()
case *types.Label:
kind = dbscheme.LabelObjectType.Index()
default:
log.Fatalf("unknown object of type %T", obj)
}
dbscheme.ObjectsTable.Emit(tw, lbl, kind, name)
// for methods, additionally extract information about the receiver
if sig, ok := obj.Type().(*types.Signature); ok {
if recv := sig.Recv(); recv != nil {
recvlbl, exists := tw.Labeler.ReceiverObjectID(recv, lbl)
if !exists {
extractObject(tw, recv, recvlbl)
}
dbscheme.MethodReceiversTable.Emit(tw, lbl, recvlbl)
}
}
}
// extractObjectType extracts type and receiver information for a given object
func extractObjectType(tw *trap.Writer, obj types.Object, lbl trap.Label) {
if tp := obj.Type(); tp != nil {
dbscheme.ObjectTypesTable.Emit(tw, lbl, extractType(tw, tp))
}
}
var (
// file:line:col
threePartPos = regexp.MustCompile(`^(.+):(\d+):(\d+)$`)
// file:line
twoPartPos = regexp.MustCompile(`^(.+):(\d+)$`)
)
// extractError extracts the message and location of a frontend error
func extractError(tw *trap.Writer, err packages.Error, pkglbl trap.Label, idx int) {
var (
lbl = tw.Labeler.FreshID()
kind = dbscheme.ErrorTypes[err.Kind].Index()
pos = err.Pos
file = ""
line = 0
col = 0
e error
)
if parts := threePartPos.FindStringSubmatch(pos); parts != nil {
// "file:line:col"
col, e = strconv.Atoi(parts[3])
if e != nil {
log.Printf("Warning: malformed column number `%s`: %v", parts[3], e)
}
line, e = strconv.Atoi(parts[2])
if e != nil {
log.Printf("Warning: malformed line number `%s`: %v", parts[2], e)
}
file = parts[1]
} else if parts := twoPartPos.FindStringSubmatch(pos); parts != nil {
// "file:line"
line, e = strconv.Atoi(parts[2])
if e != nil {
log.Printf("Warning: malformed line number `%s`: %v", parts[2], e)
}
file = parts[1]
} else if pos != "" && pos != "-" {
log.Printf("Warning: malformed error position `%s`", pos)
}
file = filepath.ToSlash(srcarchive.TransformPath(file))
dbscheme.ErrorsTable.Emit(tw, lbl, kind, err.Msg, pos, file, line, col, pkglbl, idx)
}
// extractPackage extracts AST information for all files in the given package
func extractPackage(pkg *packages.Package, wg *sync.WaitGroup,
goroutineSem *semaphore, fdSem *semaphore) {
for _, astFile := range pkg.Syntax {
wg.Add(1)
goroutineSem.acquire(1)
go func(astFile *ast.File) {
err := extractFile(astFile, pkg, fdSem)
if err != nil {
log.Fatal(err)
}
goroutineSem.release(1)
wg.Done()
}(astFile)
}
}
// normalizedPath computes the normalized path (with symlinks resolved) for the given file
func normalizedPath(ast *ast.File, fset *token.FileSet) string {
file := fset.File(ast.Package).Name()
path, err := filepath.EvalSymlinks(file)
if err != nil {
return file
}
return path
}
// extractFile extracts AST information for the given file
func extractFile(ast *ast.File, pkg *packages.Package, fdSem *semaphore) error {
fset := pkg.Fset
path := normalizedPath(ast, fset)
fdSem.acquire(3)
log.Printf("Extracting %s", path)
start := time.Now()
defer fdSem.release(1)
tw, err := trap.NewWriter(path, pkg)
if err != nil {
fdSem.release(2)
return err
}
defer tw.Close()
err = srcarchive.Add(path)
fdSem.release(2)
if err != nil {
return err
}
extractFileInfo(tw, path)
extractScopes(tw, ast, pkg)
extractFileNode(tw, ast)
tw.ForEachObject(extractObjectType)
extractNumLines(tw, path, ast)
end := time.Since(start)
log.Printf("Done extracting %s (%dms)", path, end.Nanoseconds()/1000000)
return nil
}
// stemAndExt splits a given file name into its stem (the part before the last '.')
// and extension (the part after the last '.')
func stemAndExt(base string) (string, string) {
if i := strings.LastIndexByte(base, '.'); i >= 0 {
return base[:i], base[i+1:]
}
return base, ""
}
// extractFileInfo extracts file-system level information for the given file, populating
// the `files` and `containerparent` tables
func extractFileInfo(tw *trap.Writer, file string) {
path := filepath.ToSlash(srcarchive.TransformPath(file))
components := strings.Split(path, "/")
parentPath := ""
var parentLbl trap.Label
for i, component := range components {
if i == 0 {
if component == "" {
path = "/"
} else {
path = component
}
} else {
path = parentPath + "/" + component
}
if i == len(components)-1 {
stem, ext := stemAndExt(component)
lbl := tw.Labeler.FileLabel()
dbscheme.FilesTable.Emit(tw, lbl, path, stem, ext, 0)
dbscheme.ContainerParentTable.Emit(tw, parentLbl, lbl)
extractLocation(tw, lbl, 0, 0, 0, 0)
break
}
lbl := tw.Labeler.GlobalID(path + ";folder")
dbscheme.FoldersTable.Emit(tw, lbl, path, component)
if i > 0 {
dbscheme.ContainerParentTable.Emit(tw, parentLbl, lbl)
}
if path != "/" {
parentPath = path
}
parentLbl = lbl
}
}
// extractLocation emits a location entity for the given entity
func extractLocation(tw *trap.Writer, entity trap.Label, sl int, sc int, el int, ec int) {
lbl := tw.Labeler.FileLabel()
locLbl := tw.Labeler.GlobalID(fmt.Sprintf("loc,{%s},%d,%d,%d,%d", lbl.String(), sl, sc, el, ec))
dbscheme.LocationsDefaultTable.Emit(tw, locLbl, lbl, sl, sc, el, ec)
dbscheme.HasLocationTable.Emit(tw, entity, locLbl)
}
// extractNodeLocation extracts location information for the given node
func extractNodeLocation(tw *trap.Writer, nd ast.Node, lbl trap.Label) {
if nd == nil {
return
}
fset := tw.Package.Fset
start, end := fset.Position(nd.Pos()), fset.Position(nd.End())
extractLocation(tw, lbl, start.Line, start.Column, end.Line, end.Column-1)
}
// extractPackageScope extracts symbol table information for the given package
func extractPackageScope(tw *trap.Writer, pkg *packages.Package) trap.Label {
pkgScope := pkg.Types.Scope()
pkgScopeLabel := tw.Labeler.ScopeID(pkgScope, pkg.Types)
dbscheme.ScopesTable.Emit(tw, pkgScopeLabel, dbscheme.PackageScopeType.Index())
dbscheme.ScopeNestingTable.Emit(tw, pkgScopeLabel, tw.Labeler.ScopeID(types.Universe, nil))
extractObjects(tw, pkgScope, pkgScopeLabel)
return pkgScopeLabel
}
// extractScopeLocation extracts location information for the given scope
func extractScopeLocation(tw *trap.Writer, scope *types.Scope, lbl trap.Label) {
fset := tw.Package.Fset
start, end := fset.Position(scope.Pos()), fset.Position(scope.End())
extractLocation(tw, lbl, start.Line, start.Column, end.Line, end.Column-1)
}
// extractScopes extracts symbol table information for the package scope and all local scopes
// of the given package
func extractScopes(tw *trap.Writer, nd *ast.File, pkg *packages.Package) {
pkgScopeLabel := extractPackageScope(tw, pkg)
fileScope := pkg.TypesInfo.Scopes[nd]
if fileScope != nil {
extractLocalScope(tw, fileScope, pkgScopeLabel)
}
}
// extractLocalScope extracts symbol table information for the given scope and all its nested scopes
func extractLocalScope(tw *trap.Writer, scope *types.Scope, parentScopeLabel trap.Label) {
scopeLabel := tw.Labeler.ScopeID(scope, nil)
dbscheme.ScopesTable.Emit(tw, scopeLabel, dbscheme.LocalScopeType.Index())
extractScopeLocation(tw, scope, scopeLabel)
dbscheme.ScopeNestingTable.Emit(tw, scopeLabel, parentScopeLabel)
for i := 0; i < scope.NumChildren(); i++ {
childScope := scope.Child(i)
extractLocalScope(tw, childScope, scopeLabel)
}
extractObjects(tw, scope, scopeLabel)
}
// extractFileNode extracts AST information for the given file and all nodes contained in it
func extractFileNode(tw *trap.Writer, nd *ast.File) {
lbl := tw.Labeler.FileLabel()
extractExpr(tw, nd.Name, lbl, 0)
for i, decl := range nd.Decls {
extractDecl(tw, decl, lbl, i)
}
for i, cg := range nd.Comments {
extractCommentGroup(tw, cg, lbl, i)
}
extractDoc(tw, nd.Doc, lbl)
emitScopeNodeInfo(tw, nd, lbl)
}
// extractDoc extracts information about a doc comment group associated with a given element
func extractDoc(tw *trap.Writer, doc *ast.CommentGroup, elt trap.Label) {
if doc != nil {
dbscheme.DocCommentsTable.Emit(tw, elt, tw.Labeler.LocalID(doc))
}
}
// extractCommentGroup extracts information about a doc comment group
func extractCommentGroup(tw *trap.Writer, cg *ast.CommentGroup, parent trap.Label, idx int) {
lbl := tw.Labeler.LocalID(cg)
dbscheme.CommentGroupsTable.Emit(tw, lbl, parent, idx)
extractNodeLocation(tw, cg, lbl)
for i, c := range cg.List {
extractComment(tw, c, lbl, i)
}
}
// extractComment extracts information about a given comment
func extractComment(tw *trap.Writer, c *ast.Comment, parent trap.Label, idx int) {
lbl := tw.Labeler.LocalID(c)
rawText := c.Text
var kind int
var text string
if rawText[:2] == "//" {
kind = dbscheme.SlashSlashComment.Index()
text = rawText[2:]
} else {
kind = dbscheme.SlashStarComment.Index()
text = rawText[2 : len(rawText)-2]
}
dbscheme.CommentsTable.Emit(tw, lbl, kind, parent, idx, text)
extractNodeLocation(tw, c, lbl)
}
// emitScopeNodeInfo associates an AST node with its induced scope, if any
func emitScopeNodeInfo(tw *trap.Writer, nd ast.Node, lbl trap.Label) {
scope, exists := tw.Package.TypesInfo.Scopes[nd]
if exists {
dbscheme.ScopeNodesTable.Emit(tw, lbl, tw.Labeler.ScopeID(scope, tw.Package.Types))
}
}
// extractExpr extracts AST information for the given expression and all its subexpressions
func extractExpr(tw *trap.Writer, expr ast.Expr, parent trap.Label, idx int) {
if expr == nil {
return
}
lbl := tw.Labeler.LocalID(expr)
extractTypeOf(tw, expr, lbl)
var kind int
switch expr := expr.(type) {
case *ast.BadExpr:
kind = dbscheme.BadExpr.Index()
case *ast.Ident:
if expr == nil {
return
}
kind = dbscheme.IdentExpr.Index()
dbscheme.LiteralsTable.Emit(tw, lbl, expr.Name, expr.Name)
def := tw.Package.TypesInfo.Defs[expr]
if def != nil {
defTyp := extractType(tw, def.Type())
objlbl, exists := tw.Labeler.LookupObjectID(def, defTyp)
if objlbl == trap.InvalidLabel {
log.Printf("Omitting def binding to unknown object %v", def)
} else {
if !exists {
extractObject(tw, def, objlbl)
}
dbscheme.DefsTable.Emit(tw, lbl, objlbl)
}
}
use := tw.Package.TypesInfo.Uses[expr]
if use != nil {
useTyp := extractType(tw, use.Type())
objlbl, exists := tw.Labeler.LookupObjectID(use, useTyp)
if objlbl == trap.InvalidLabel {
log.Printf("Omitting use binding to unknown object %v", use)
} else {
if !exists {
extractObject(tw, use, objlbl)
}
dbscheme.UsesTable.Emit(tw, lbl, objlbl)
}
}
case *ast.Ellipsis:
if expr == nil {
return
}
kind = dbscheme.EllipsisExpr.Index()
extractExpr(tw, expr.Elt, lbl, 0)
case *ast.BasicLit:
if expr == nil {
return
}
value := ""
switch expr.Kind {
case token.INT:
ival, _ := strconv.ParseInt(expr.Value, 0, 64)
value = strconv.FormatInt(ival, 10)
kind = dbscheme.IntLitExpr.Index()
case token.FLOAT:
value = expr.Value
kind = dbscheme.FloatLitExpr.Index()
case token.IMAG:
value = expr.Value
kind = dbscheme.ImagLitExpr.Index()
case token.CHAR:
value, _ = strconv.Unquote(expr.Value)
kind = dbscheme.CharLitExpr.Index()
case token.STRING:
value, _ = strconv.Unquote(expr.Value)
kind = dbscheme.StringLitExpr.Index()
default:
log.Fatalf("unknown literal kind %v", expr.Kind)
}
dbscheme.LiteralsTable.Emit(tw, lbl, value, expr.Value)
case *ast.FuncLit:
if expr == nil {
return
}
kind = dbscheme.FuncLitExpr.Index()
extractExpr(tw, expr.Type, lbl, 0)
extractStmt(tw, expr.Body, lbl, 1)
case *ast.CompositeLit:
if expr == nil {
return
}
kind = dbscheme.CompositeLitExpr.Index()
extractExpr(tw, expr.Type, lbl, 0)
extractExprs(tw, expr.Elts, lbl, 1, 1)
case *ast.ParenExpr:
if expr == nil {
return
}
kind = dbscheme.ParenExpr.Index()
extractExpr(tw, expr.X, lbl, 0)
case *ast.SelectorExpr:
if expr == nil {
return
}
kind = dbscheme.SelectorExpr.Index()
extractExpr(tw, expr.X, lbl, 0)
extractExpr(tw, expr.Sel, lbl, 1)
case *ast.IndexExpr:
if expr == nil {
return
}
kind = dbscheme.IndexExpr.Index()
extractExpr(tw, expr.X, lbl, 0)
extractExpr(tw, expr.Index, lbl, 1)
case *ast.SliceExpr:
if expr == nil {
return
}
kind = dbscheme.SliceExpr.Index()
extractExpr(tw, expr.X, lbl, 0)
extractExpr(tw, expr.Low, lbl, 1)
extractExpr(tw, expr.High, lbl, 2)
extractExpr(tw, expr.Max, lbl, 3)
case *ast.TypeAssertExpr:
if expr == nil {
return
}
kind = dbscheme.TypeAssertExpr.Index()
extractExpr(tw, expr.X, lbl, 0)
extractExpr(tw, expr.Type, lbl, 1)
case *ast.CallExpr:
if expr == nil {
return
}
kind = dbscheme.CallOrConversionExpr.Index()
extractExpr(tw, expr.Fun, lbl, 0)
extractExprs(tw, expr.Args, lbl, 1, 1)
case *ast.StarExpr:
if expr == nil {
return
}
kind = dbscheme.StarExpr.Index()
extractExpr(tw, expr.X, lbl, 0)
case *ast.KeyValueExpr:
if expr == nil {
return
}
kind = dbscheme.KeyValueExpr.Index()
extractExpr(tw, expr.Key, lbl, 0)
extractExpr(tw, expr.Value, lbl, 1)
case *ast.UnaryExpr:
if expr == nil {
return
}
tp := dbscheme.UnaryExprs[expr.Op]
if tp == nil {
log.Fatalf("unsupported unary operator %s", expr.Op)
}
kind = tp.Index()
extractExpr(tw, expr.X, lbl, 0)
case *ast.BinaryExpr:
if expr == nil {
return
}
tp := dbscheme.BinaryExprs[expr.Op]
if tp == nil {
log.Fatalf("unsupported binary operator %s", expr.Op)
}
kind = tp.Index()
extractExpr(tw, expr.X, lbl, 0)
extractExpr(tw, expr.Y, lbl, 1)
case *ast.ArrayType:
if expr == nil {
return
}
kind = dbscheme.ArrayTypeExpr.Index()
extractExpr(tw, expr.Len, lbl, 0)
extractExpr(tw, expr.Elt, lbl, 1)
case *ast.StructType:
if expr == nil {
return
}
kind = dbscheme.StructTypeExpr.Index()
extractFields(tw, expr.Fields, lbl, 0, 1)
case *ast.FuncType:
if expr == nil {
return
}
kind = dbscheme.FuncTypeExpr.Index()
extractFields(tw, expr.Params, lbl, 0, 1)
extractFields(tw, expr.Results, lbl, -1, -1)
emitScopeNodeInfo(tw, expr, lbl)
case *ast.InterfaceType:
if expr == nil {
return
}
kind = dbscheme.InterfaceTypeExpr.Index()
extractFields(tw, expr.Methods, lbl, 0, 1)
case *ast.MapType:
if expr == nil {
return
}
kind = dbscheme.MapTypeExpr.Index()
extractExpr(tw, expr.Key, lbl, 0)
extractExpr(tw, expr.Value, lbl, 1)
case *ast.ChanType:
if expr == nil {
return
}
tp := dbscheme.ChanTypeExprs[expr.Dir]
if tp == nil {
log.Fatalf("unsupported channel direction %v", expr.Dir)
}
kind = tp.Index()
extractExpr(tw, expr.Value, lbl, 0)
default:
log.Fatalf("unknown expression of type %T", expr)
}
dbscheme.ExprsTable.Emit(tw, lbl, kind, parent, idx)
extractNodeLocation(tw, expr, lbl)
extractValueOf(tw, expr, lbl)
}
// extractExprs extracts AST information for a list of expressions, which are children of
// the given parent
// `idx` is the index of the first child in the list, and `dir` is the index increment of
// each child over its preceding child (usually either 1 for assigning increasing indices, or
// -1 for decreasing indices)
func extractExprs(tw *trap.Writer, exprs []ast.Expr, parent trap.Label, idx int, dir int) {
if exprs != nil {
for _, expr := range exprs {
extractExpr(tw, expr, parent, idx)
idx += dir
}
}
}
// extractTypeOf looks up the type of `expr`, extracts it if it hasn't previously been
// extracted, and associates it with `expr` in the `type_of` table
func extractTypeOf(tw *trap.Writer, expr ast.Expr, lbl trap.Label) {
tp := tw.Package.TypesInfo.TypeOf(expr)
if tp != nil {
tplbl := extractType(tw, tp)
dbscheme.TypeOfTable.Emit(tw, lbl, tplbl)
}
}
// extractValueOf looks up the value of `expr`, and associates it with `expr` in
// the `consts` table
func extractValueOf(tw *trap.Writer, expr ast.Expr, lbl trap.Label) {
tpVal := tw.Package.TypesInfo.Types[expr]
if tpVal.Value != nil {
// if Value is non-nil, the expression has a constant value
// note that string literals in import statements do not have an associated
// Value and so do not get added to the table
var value string
exact := tpVal.Value.ExactString()
switch tpVal.Value.Kind() {
case constant.String:
// we need to unquote strings
value = constant.StringVal(tpVal.Value)
exact = constant.StringVal(tpVal.Value)
case constant.Float:
flval, _ := constant.Float64Val(tpVal.Value)
value = fmt.Sprintf("%.20g", flval)
case constant.Complex:
real, _ := constant.Float64Val(constant.Real(tpVal.Value))
imag, _ := constant.Float64Val(constant.Imag(tpVal.Value))
value = fmt.Sprintf("(%.20g + %.20gi)", real, imag)
default:
value = tpVal.Value.ExactString()
}
dbscheme.ConstValuesTable.Emit(tw, lbl, value, exact)
} else if tpVal.IsNil() {
dbscheme.ConstValuesTable.Emit(tw, lbl, "nil", "nil")
}
}
// extractFields extracts AST information for a list of fields, which are children of
// the given parent
// `idx` is the index of the first child in the list, and `dir` is the index increment of
// each child over its preceding child (usually either 1 for assigning increasing indices, or
// -1 for decreasing indices)
func extractFields(tw *trap.Writer, fields *ast.FieldList, parent trap.Label, idx int, dir int) {
if fields == nil || fields.List == nil {
return
}
for _, field := range fields.List {
lbl := tw.Labeler.LocalID(field)
dbscheme.FieldsTable.Emit(tw, lbl, parent, idx)
extractNodeLocation(tw, field, lbl)
if field.Names != nil {
for i, name := range field.Names {
extractExpr(tw, name, lbl, i+1)
}
}
extractExpr(tw, field.Type, lbl, 0)
extractExpr(tw, field.Tag, lbl, -1)
extractDoc(tw, field.Doc, lbl)
idx += dir
}
}
// extractStmt extracts AST information for a given statement and all other statements or expressions
// nested inside it
func extractStmt(tw *trap.Writer, stmt ast.Stmt, parent trap.Label, idx int) {
if stmt == nil {
return
}
lbl := tw.Labeler.LocalID(stmt)
var kind int
switch stmt := stmt.(type) {
case *ast.BadStmt:
kind = dbscheme.BadStmtType.Index()
case *ast.DeclStmt:
if stmt == nil {
return
}
kind = dbscheme.DeclStmtType.Index()
extractDecl(tw, stmt.Decl, lbl, 0)
case *ast.EmptyStmt:
kind = dbscheme.EmptyStmtType.Index()
case *ast.LabeledStmt:
if stmt == nil {
return
}
kind = dbscheme.LabeledStmtType.Index()
extractExpr(tw, stmt.Label, lbl, 0)
extractStmt(tw, stmt.Stmt, lbl, 1)
case *ast.ExprStmt:
if stmt == nil {
return
}
kind = dbscheme.ExprStmtType.Index()
extractExpr(tw, stmt.X, lbl, 0)
case *ast.SendStmt:
if stmt == nil {
return
}
kind = dbscheme.SendStmtType.Index()
extractExpr(tw, stmt.Chan, lbl, 0)
extractExpr(tw, stmt.Value, lbl, 1)
case *ast.IncDecStmt:
if stmt == nil {
return
}
if stmt.Tok == token.INC {
kind = dbscheme.IncStmtType.Index()
} else if stmt.Tok == token.DEC {
kind = dbscheme.DecStmtType.Index()
} else {
log.Fatalf("unsupported increment/decrement operator %v", stmt.Tok)
}
extractExpr(tw, stmt.X, lbl, 0)
case *ast.AssignStmt:
if stmt == nil {
return
}
tp := dbscheme.AssignStmtTypes[stmt.Tok]
if tp == nil {
log.Fatalf("unsupported assignment statement with operator %v", stmt.Tok)
}
kind = tp.Index()
extractExprs(tw, stmt.Lhs, lbl, -1, -1)
extractExprs(tw, stmt.Rhs, lbl, 1, 1)
case *ast.GoStmt:
if stmt == nil {
return
}
kind = dbscheme.GoStmtType.Index()
extractExpr(tw, stmt.Call, lbl, 0)
case *ast.DeferStmt:
if stmt == nil {
return
}
kind = dbscheme.DeferStmtType.Index()
extractExpr(tw, stmt.Call, lbl, 0)
case *ast.ReturnStmt:
kind = dbscheme.ReturnStmtType.Index()
extractExprs(tw, stmt.Results, lbl, 0, 1)
case *ast.BranchStmt:
if stmt == nil {
return
}
switch stmt.Tok {
case token.BREAK:
kind = dbscheme.BreakStmtType.Index()
case token.CONTINUE:
kind = dbscheme.ContinueStmtType.Index()
case token.GOTO:
kind = dbscheme.GotoStmtType.Index()
case token.FALLTHROUGH:
kind = dbscheme.FallthroughStmtType.Index()
default:
log.Fatalf("unsupported branch statement type %v", stmt.Tok)
}
extractExpr(tw, stmt.Label, lbl, 0)
case *ast.BlockStmt:
if stmt == nil {
return
}
kind = dbscheme.BlockStmtType.Index()
extractStmts(tw, stmt.List, lbl, 0, 1)
emitScopeNodeInfo(tw, stmt, lbl)
case *ast.IfStmt:
if stmt == nil {
return
}
kind = dbscheme.IfStmtType.Index()
extractStmt(tw, stmt.Init, lbl, 0)
extractExpr(tw, stmt.Cond, lbl, 1)
extractStmt(tw, stmt.Body, lbl, 2)
extractStmt(tw, stmt.Else, lbl, 3)
emitScopeNodeInfo(tw, stmt, lbl)
case *ast.CaseClause:
if stmt == nil {
return
}
kind = dbscheme.CaseClauseType.Index()
extractExprs(tw, stmt.List, lbl, -1, -1)
extractStmts(tw, stmt.Body, lbl, 0, 1)
emitScopeNodeInfo(tw, stmt, lbl)
case *ast.SwitchStmt:
if stmt == nil {
return
}
kind = dbscheme.ExprSwitchStmtType.Index()
extractStmt(tw, stmt.Init, lbl, 0)
extractExpr(tw, stmt.Tag, lbl, 1)
extractStmt(tw, stmt.Body, lbl, 2)
emitScopeNodeInfo(tw, stmt, lbl)
case *ast.TypeSwitchStmt:
if stmt == nil {
return
}
kind = dbscheme.TypeSwitchStmtType.Index()
extractStmt(tw, stmt.Init, lbl, 0)
extractStmt(tw, stmt.Assign, lbl, 1)
extractStmt(tw, stmt.Body, lbl, 2)
emitScopeNodeInfo(tw, stmt, lbl)
case *ast.CommClause:
if stmt == nil {
return
}
kind = dbscheme.CommClauseType.Index()
extractStmt(tw, stmt.Comm, lbl, 0)
extractStmts(tw, stmt.Body, lbl, 1, 1)
emitScopeNodeInfo(tw, stmt, lbl)
case *ast.SelectStmt:
kind = dbscheme.SelectStmtType.Index()
extractStmt(tw, stmt.Body, lbl, 0)
case *ast.ForStmt:
if stmt == nil {
return
}
kind = dbscheme.ForStmtType.Index()
extractStmt(tw, stmt.Init, lbl, 0)
extractExpr(tw, stmt.Cond, lbl, 1)
extractStmt(tw, stmt.Post, lbl, 2)
extractStmt(tw, stmt.Body, lbl, 3)
emitScopeNodeInfo(tw, stmt, lbl)
case *ast.RangeStmt:
if stmt == nil {
return
}
kind = dbscheme.RangeStmtType.Index()
extractExpr(tw, stmt.Key, lbl, 0)
extractExpr(tw, stmt.Value, lbl, 1)
extractExpr(tw, stmt.X, lbl, 2)
extractStmt(tw, stmt.Body, lbl, 3)
emitScopeNodeInfo(tw, stmt, lbl)
default:
log.Fatalf("unknown statement of type %T", stmt)
}
dbscheme.StmtsTable.Emit(tw, lbl, kind, parent, idx)
extractNodeLocation(tw, stmt, lbl)
}
// extractStmts extracts AST information for a list of statements, which are children of
// the given parent
// `idx` is the index of the first child in the list, and `dir` is the index increment of
// each child over its preceding child (usually either 1 for assigning increasing indices, or
// -1 for decreasing indices)
func extractStmts(tw *trap.Writer, stmts []ast.Stmt, parent trap.Label, idx int, dir int) {
if stmts != nil {
for _, stmt := range stmts {
extractStmt(tw, stmt, parent, idx)
idx += dir
}
}
}
// extractDecl extracts AST information for the given declaration
func extractDecl(tw *trap.Writer, decl ast.Decl, parent trap.Label, idx int) {
lbl := tw.Labeler.LocalID(decl)
var kind int
switch decl := decl.(type) {
case *ast.BadDecl:
kind = dbscheme.BadDeclType.Index()
case *ast.GenDecl:
if decl == nil {
return
}
switch decl.Tok {
case token.IMPORT:
kind = dbscheme.ImportDeclType.Index()
case token.CONST:
kind = dbscheme.ConstDeclType.Index()
case token.TYPE:
kind = dbscheme.TypeDeclType.Index()
case token.VAR:
kind = dbscheme.VarDeclType.Index()
default:
log.Fatalf("unknown declaration of kind %v", decl.Tok)
}
for i, spec := range decl.Specs {
extractSpec(tw, spec, lbl, i)
}
extractDoc(tw, decl.Doc, lbl)
case *ast.FuncDecl:
if decl == nil {
return
}
kind = dbscheme.FuncDeclType.Index()
extractFields(tw, decl.Recv, lbl, -1, -1)
extractExpr(tw, decl.Name, lbl, 0)
extractExpr(tw, decl.Type, lbl, 1)
extractStmt(tw, decl.Body, lbl, 2)
extractDoc(tw, decl.Doc, lbl)
default:
log.Fatalf("unknown declaration of type %T", decl)
}
dbscheme.DeclsTable.Emit(tw, lbl, kind, parent, idx)
extractNodeLocation(tw, decl, lbl)
}
// extractSpec extracts AST information for the given declaration specifier
func extractSpec(tw *trap.Writer, spec ast.Spec, parent trap.Label, idx int) {
lbl := tw.Labeler.LocalID(spec)
var kind int
switch spec := spec.(type) {
case *ast.ImportSpec:
if spec == nil {
return
}
kind = dbscheme.ImportSpecType.Index()
extractExpr(tw, spec.Name, lbl, 0)
extractExpr(tw, spec.Path, lbl, 1)
extractDoc(tw, spec.Doc, lbl)
case *ast.ValueSpec:
if spec == nil {
return
}
kind = dbscheme.ValueSpecType.Index()
for i, name := range spec.Names {
extractExpr(tw, name, lbl, -(1 + i))
}
extractExpr(tw, spec.Type, lbl, 0)
extractExprs(tw, spec.Values, lbl, 1, 1)
extractDoc(tw, spec.Doc, lbl)
case *ast.TypeSpec:
if spec == nil {
return
}
if spec.Assign.IsValid() {
kind = dbscheme.AliasSpecType.Index()
} else {
kind = dbscheme.TypeDefSpecType.Index()
}
extractExpr(tw, spec.Name, lbl, 0)
extractExpr(tw, spec.Type, lbl, 1)
extractDoc(tw, spec.Doc, lbl)
}
dbscheme.SpecsTable.Emit(tw, lbl, kind, parent, idx)
extractNodeLocation(tw, spec, lbl)
}
// extractType extracts type information for `tp` and returns its associated label;
// types are only extracted once, so the second time `extractType` is invoked it simply returns the label
func extractType(tw *trap.Writer, tp types.Type) trap.Label {
lbl, exists := getTypeLabel(tw, tp)
if !exists {
var kind int
switch tp := tp.(type) {
case *types.Basic:
branch := dbscheme.BasicTypes[tp.Kind()]
if branch == nil {
log.Fatalf("unknown basic type %v", tp.Kind())
}
kind = branch.Index()
case *types.Array:
kind = dbscheme.ArrayType.Index()
dbscheme.ArrayLengthTable.Emit(tw, lbl, fmt.Sprintf("%d", tp.Len()))
extractElementType(tw, lbl, tp.Elem())
case *types.Slice:
kind = dbscheme.SliceType.Index()
extractElementType(tw, lbl, tp.Elem())
case *types.Struct:
kind = dbscheme.StructType.Index()
for i := 0; i < tp.NumFields(); i++ {
field := tp.Field(i)
// ensure the field is associated with a label
fieldlbl, exists := tw.Labeler.FieldID(field, i, lbl)
if !exists {
extractObject(tw, field, fieldlbl)
}
dbscheme.FieldStructsTable.Emit(tw, fieldlbl, lbl)
name := field.Name()
if field.Embedded() {
name = ""
}
extractComponentType(tw, lbl, i, name, field.Type())
}
case *types.Pointer:
kind = dbscheme.PointerType.Index()
extractBaseType(tw, lbl, tp.Elem())
case *types.Interface:
kind = dbscheme.InterfaceType.Index()
for i := 0; i < tp.NumMethods(); i++ {
meth := tp.Method(i)
extractMethod(tw, meth)
extractComponentType(tw, lbl, i, meth.Name(), meth.Type())
}
case *types.Tuple:
kind = dbscheme.TupleType.Index()
for i := 0; i < tp.Len(); i++ {
extractComponentType(tw, lbl, i, "", tp.At(i).Type())
}
case *types.Signature:
kind = dbscheme.SignatureType.Index()
parms, results := tp.Params(), tp.Results()
if parms != nil {
for i := 0; i < parms.Len(); i++ {
parm := parms.At(i)
extractComponentType(tw, lbl, i+1, "", parm.Type())
}
}
if results != nil {
for i := 0; i < results.Len(); i++ {
result := results.At(i)
extractComponentType(tw, lbl, -(i + 1), "", result.Type())
}
}
case *types.Map:
kind = dbscheme.MapType.Index()
extractKeyType(tw, lbl, tp.Key())
extractElementType(tw, lbl, tp.Elem())
case *types.Chan:
kind = dbscheme.ChanTypes[tp.Dir()].Index()
extractElementType(tw, lbl, tp.Elem())
case *types.Named:
kind = dbscheme.NamedType.Index()
dbscheme.TypeNameTable.Emit(tw, lbl, tp.Obj().Name())
underlying := tp.Underlying()
extractUnderlyingType(tw, lbl, underlying)
entitylbl, exists := tw.Labeler.LookupObjectID(tp.Obj(), lbl)
if entitylbl == trap.InvalidLabel {
log.Printf("Omitting type-object binding for unknown object %v.\n", tp.Obj())
} else {
if !exists {
extractObject(tw, tp.Obj(), entitylbl)
}
dbscheme.TypeObjectTable.Emit(tw, lbl, entitylbl)
}
// ensure all methods have labels
for i := 0; i < tp.NumMethods(); i++ {
meth := tp.Method(i)
extractMethod(tw, meth)
}
// associate all methods of underlying interface with this type
if underlyingInterface, ok := underlying.(*types.Interface); ok {
for i := 0; i < underlyingInterface.NumMethods(); i++ {
methlbl := extractMethod(tw, underlyingInterface.Method(i))
dbscheme.MethodHostsTable.Emit(tw, methlbl, lbl)
}
}
default:
log.Fatalf("unexpected type %T", tp)
}
dbscheme.TypesTable.Emit(tw, lbl, kind)
}
return lbl
}
// getTypeLabel looks up the label associated with `tp`, creating a new label if
// it does not have one yet; the second result indicates whether the label
// already existed
//
// Type labels refer to global keys to ensure that if the same type is
// encountered during the extraction of different files it is still ultimately
// mapped to the same entity. In particular, this means that keys for compound
// types refer to the labels of their component types. For named types, the key
// is constructed from their globally unique ID. This prevents cyclic type keys
// since type recursion in Go always goes through named types.
func getTypeLabel(tw *trap.Writer, tp types.Type) (trap.Label, bool) {
lbl, exists := tw.Labeler.TypeLabels[tp]
if !exists {
switch tp := tp.(type) {
case *types.Basic:
lbl = tw.Labeler.GlobalID(fmt.Sprintf("%d;basictype", tp.Kind()))
case *types.Array:
len := tp.Len()
elem := extractType(tw, tp.Elem())
lbl = tw.Labeler.GlobalID(fmt.Sprintf("%d,{%s};arraytype", len, elem))
case *types.Slice:
elem := extractType(tw, tp.Elem())
lbl = tw.Labeler.GlobalID(fmt.Sprintf("{%s};slicetype", elem))
case *types.Struct:
var b strings.Builder
for i := 0; i < tp.NumFields(); i++ {
field := tp.Field(i)
fieldTypeLbl := extractType(tw, field.Type())
if i > 0 {
b.WriteString(",")
}
name := field.Name()
if field.Embedded() {
name = ""
}
fmt.Fprintf(&b, "%s,{%s},%s", name, fieldTypeLbl, tp.Tag(i))
}
lbl = tw.Labeler.GlobalID(fmt.Sprintf("%s;structtype", b.String()))
case *types.Pointer:
base := extractType(tw, tp.Elem())
lbl = tw.Labeler.GlobalID(fmt.Sprintf("{%s};pointertype", base))
case *types.Interface:
var b strings.Builder
for i := 0; i < tp.NumMethods(); i++ {
meth := tp.Method(i)
methLbl := extractType(tw, meth.Type())
if i > 0 {
b.WriteString(",")
}
fmt.Fprintf(&b, "%s,{%s}", meth.Id(), methLbl)
}
lbl = tw.Labeler.GlobalID(fmt.Sprintf("%s;interfacetype", b.String()))
case *types.Tuple:
var b strings.Builder
for i := 0; i < tp.Len(); i++ {
compLbl := extractType(tw, tp.At(i).Type())
if i > 0 {
b.WriteString(",")
}
fmt.Fprintf(&b, "{%s}", compLbl)
}
lbl = tw.Labeler.GlobalID(fmt.Sprintf("%s;tupletype", b.String()))
case *types.Signature:
var b strings.Builder
parms, results := tp.Params(), tp.Results()
if parms != nil {
for i := 0; i < parms.Len(); i++ {
parmLbl := extractType(tw, parms.At(i).Type())
if i > 0 {
b.WriteString(",")
}
fmt.Fprintf(&b, "{%s}", parmLbl)
}
}
b.WriteString(";")
if results != nil {
for i := 0; i < results.Len(); i++ {
resultLbl := extractType(tw, results.At(i).Type())
if i > 0 {
b.WriteString(",")
}
fmt.Fprintf(&b, "{%s}", resultLbl)
}
}
lbl = tw.Labeler.GlobalID(fmt.Sprintf("%s;signaturetype", b.String()))
case *types.Map:
key := extractType(tw, tp.Key())
value := extractType(tw, tp.Elem())
lbl = tw.Labeler.GlobalID(fmt.Sprintf("{%s},{%s};maptype", key, value))
case *types.Chan:
dir := tp.Dir()
elem := extractType(tw, tp.Elem())
lbl = tw.Labeler.GlobalID(fmt.Sprintf("%v,{%s};chantype", dir, elem))
case *types.Named:
entitylbl, exists := tw.Labeler.LookupObjectID(tp.Obj(), lbl)
if entitylbl == trap.InvalidLabel {
panic(fmt.Sprintf("Cannot construct label for named type %v (underlying object is %v).\n", tp, tp.Obj()))
}
if !exists {
extractObject(tw, tp.Obj(), entitylbl)
}
lbl = tw.Labeler.GlobalID(fmt.Sprintf("{%s};namedtype", entitylbl))
}
tw.Labeler.TypeLabels[tp] = lbl
}
return lbl, exists
}
// extractKeyType extracts `key` as the key type of the map type `mp`
func extractKeyType(tw *trap.Writer, mp trap.Label, key types.Type) {
dbscheme.KeyTypeTable.Emit(tw, mp, extractType(tw, key))
}
// extractElementType extracts `element` as the element type of the container type `container`
func extractElementType(tw *trap.Writer, container trap.Label, element types.Type) {
dbscheme.ElementTypeTable.Emit(tw, container, extractType(tw, element))
}
// extractBaseType extracts `base` as the base type of the pointer type `ptr`
func extractBaseType(tw *trap.Writer, ptr trap.Label, base types.Type) {
dbscheme.BaseTypeTable.Emit(tw, ptr, extractType(tw, base))
}
// extractUnderlyingType extracts `underlying` as the underlying type of the
// named type `named`
func extractUnderlyingType(tw *trap.Writer, named trap.Label, underlying types.Type) {
dbscheme.UnderlyingTypeTable.Emit(tw, named, extractType(tw, underlying))
}
// extractComponentType extracts `component` as the `idx`th component type of `parent` with name `name`
func extractComponentType(tw *trap.Writer, parent trap.Label, idx int, name string, component types.Type) {
dbscheme.ComponentTypesTable.Emit(tw, parent, idx, name, extractType(tw, component))
}
// extractNumLines extracts lines-of-code and lines-of-comments information for the
// given file
func extractNumLines(tw *trap.Writer, fileName string, ast *ast.File) {
f := tw.Package.Fset.File(ast.Pos())
lineCount := f.LineCount()
// count lines of code by tokenizing
linesOfCode := 0
src, err := ioutil.ReadFile(fileName)
if err != nil {
log.Fatalf("Unable to read file %s.", fileName)
}
var s scanner.Scanner
lastCodeLine := -1
s.Init(f, src, nil, 0)
for {
pos, tok, lit := s.Scan()
if tok == token.EOF {
break
} else if tok != token.ILLEGAL {
tkStartLine := f.Position(pos).Line
tkEndLine := tkStartLine + strings.Count(lit, "\n")
if tkEndLine > lastCodeLine {
linesOfCode += tkEndLine - tkStartLine + 1
lastCodeLine = tkEndLine
}
}
}
// count lines of comments by iterating over ast.Comments
linesOfComments := 0
for _, cg := range ast.Comments {
for _, g := range cg.List {
fset := tw.Package.Fset
startPos, endPos := fset.Position(g.Pos()), fset.Position(g.End())
linesOfComments += endPos.Line - startPos.Line + 1
}
}
dbscheme.NumlinesTable.Emit(tw, tw.Labeler.FileLabel(), lineCount, linesOfCode, linesOfComments)
}
|
[
"\"LGTM_THREADS\""
] |
[] |
[
"LGTM_THREADS"
] |
[]
|
["LGTM_THREADS"]
|
go
| 1 | 0 | |
adbc/config.py
|
from adbc.template import resolve_template
from yaml import safe_load as load
from adbc.vault import vault
import copy
import os
def get_initial_context(vault=True, env=True, context=None):
"""Return context of available services, such as Vault"""
initial = {}
if vault:
# vault should receive the context
initial['vault'] = VaultConfig(context=context)
if env:
initial['env'] = dict(os.environ)
return initial
def get_config(filename=None, data=None, context=None):
if not data:
if not filename:
filename = os.environ.get('ADBC_CONFIG_PATH') or 'adbc.yml'
data = read_config_file(filename)
initial = get_initial_context(context=context)
if not context:
context = initial
else:
context.update(initial)
return hydrate_config(
data,
context=context
)
def read_config_file(filename):
"""
Arguments:
filename: string
Return:
config: dict representing raw config
"""
with open(filename, "r") as file:
data = load(file.read())
if "adbc" not in data:
raise Exception(f'Invalid config file "{filename}", missing "adbc" block')
return data
def hydrate_config(config, context=None):
"""Hydrates configuration
Looks for {{ template.tags }} and executes using context
Arguments:
config: string or dict representing configuration data to hydrate
context: dict of context to pass in
Return:
dict of hydrated config
"""
if config is None or isinstance(config, (bool, float, int)):
return config
if isinstance(config, str):
return resolve_template(config, context)
if isinstance(config, list):
return [hydrate_config(c, context) for c in config]
assert isinstance(config, dict)
result = {}
for key, value in config.items():
keys = []
alias = None
# build key(s)
original = key
key = resolve_template(key, context)
if isinstance(key, list):
# multi-value key
alias = getattr(key, "__alias__", original)
for record in key:
ctx = copy.copy(context)
ctx[alias] = record
keys.append((ctx, record))
else:
keys = [(context, key)]
# build value(s)
for ctx, k in keys:
result[k] = hydrate_config(value, ctx)
return result
class VaultConfig(object):
__FIELDS__ = (
"args",
"alias",
"context",
"context_key",
"context_mode",
"alias_mode",
)
def __init__(
self,
args=None,
alias=None,
context=None,
context_key=None,
context_mode=False,
alias_mode=False,
):
# e.g. ['kv', 'get', 'secret', 'environments']
self.__args__ = args or []
self.__context__ = context
self.__context_key__ = context_key or []
self.__context_mode__ = context_mode
self.__alias_mode__ = alias_mode
self.__alias__ = alias
def __getattr__(self, key):
result = self.__extend__(key)
return result
def __produce__(self):
if self.__context_mode__:
# still in context mode
return self.__end_context_mode__().__produce__()
# TODO: vault integration here
args = self.__args__
return vault(args)
def __clone__(self, **kwargs):
for field in self.__FIELDS__:
if field not in kwargs:
uf = f"__{field}__"
kwargs[field] = getattr(self, uf)
return VaultConfig(**kwargs)
def __end_context_mode__(self):
# extract captured context key from context
context_key = "".join(self.__context_key__)
args = self.__args__
if context_key:
# use it to get a new key from the context
key = self.__context__[context_key]
args = copy.copy(args)
# add the key to the running argument list
args.append(key)
else:
raise Exception("end context mode called without any context key")
return self.__clone__(args=args, context_mode=False, context_key=None)
def __extend__(self, key):
if key.startswith("_") and key.endswith("_"):
# special key
if key == "_":
if self.__context_mode__:
return self.__end_context_mode__()
else:
return self.__clone__(
context_mode=True, context_key=None
)
elif key == "_as_":
return self.__clone__(alias_mode=True)
elif key == "_data_":
# produce data
return self.__produce__()
else:
raise Exception(f'unexpected path: "{key}"')
else:
# normal key
if self.__alias_mode__:
# alias and produce data
self.__alias__ = key
return self.__produce__()
args = None
if self.__context_mode__:
# build context key
args = self.__context_key__
args.append(key)
return self.__clone__(context_key=args)
else:
args = copy.copy(self.__args__)
args.append(key)
return self.__clone__(args=args)
class WithAlias(object):
def __init__(self, *args, **kwargs):
self.__alias__ = kwargs.get("alias", None)
class AliasDict(WithAlias, dict):
pass
class AliasList(WithAlias, list):
pass
|
[] |
[] |
[
"ADBC_CONFIG_PATH"
] |
[]
|
["ADBC_CONFIG_PATH"]
|
python
| 1 | 0 | |
Train_model/train_model.py
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import pandas as pd
data = pd.read_csv('train_label.csv') # CSV file path
image = data.iloc[:, 0].values
label = data.iloc[:, 1:].values
train = tf.data.Dataset.from_tensor_slices((image, label))
def collector(images_file, label):
image = tf.io.read_file('train\\'+images_file) # train_images file path
image = tf.image.decode_image(image, channels=1, dtype=tf.float32)
labels = {'label': label[0], 'coordinates': label[1:]}
return image, labels
train = (
train.shuffle(buffer_size=label.shape[0])
.map(collector)
.batch(batch_size=100)
)
#Use call back to store every itter weights
callback = tf.keras.callbacks.ModelCheckpoint(
'checkpoint/',
save_weights_only=True,
monitor='accuracy',
save_best_only=True
)
# Using the functional API
rg = tf.keras.regularizers.l1(0.001)
input = tf.keras.Input(shape=(75, 75, 1))
x = tf.keras.layers.Conv2D(32, kernel_size=3, activation='relu', kernel_regularizer=rg)(input)
x = tf.keras.layers.MaxPooling2D((3,3), strides=(1,1), padding='valid')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu', kernel_regularizer=rg)(x)
x = tf.keras.layers.MaxPooling2D((3,3), strides=(1,1), padding='valid')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(128, kernel_size=3, activation='relu', kernel_regularizer=rg)(x)
x = tf.keras.layers.MaxPooling2D((3,3), strides=(1,1), padding='valid')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Flatten()(x)
# TWO output layers, one for label training and second for bounding box prediction
output1 = tf.keras.layers.Dense(10, activation='softmax', name="label")(x)
output2 = tf.keras.layers.Dense(4, name="coordinates")(x)
model = tf.keras.Model(inputs=input, outputs=[output1, output2])
# Two loss function
model.compile(loss={"label": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),"coordinates": 'mean_squared_error'},
optimizer='adam', metrics=['accuracy'])
model.fit(train, epochs=10, verbose=1, callbacks=[callback])
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
cmd/root.go
|
package cmd
import (
"os"
"os/signal"
"syscall"
"fmt"
"github.com/spf13/cobra"
"github.com/growlog/things-server/internal"
)
var rootCmd = &cobra.Command{
Use: "things-server",
Short: "GrowLog Things Web Service",
Long: `GrowLog Thing is a web service that helps you access time-series data of your IoT devices.`,
Run: func(cmd *cobra.Command, args []string) {
// Load up our `environment variables` from our operating system.
dbHost := os.Getenv("GROWLOG_THING_DB_HOST")
dbPort := os.Getenv("GROWLOG_THING_DB_PORT")
dbUser := os.Getenv("GROWLOG_THING_DB_USER")
dbPassword := os.Getenv("GROWLOG_THING_DB_PASSWORD")
dbName := os.Getenv("GROWLOG_THING_DB_NAME")
thingAddress := os.Getenv("GROWLOG_THING_APP_ADDRESS")
remoteAccountAddress := os.Getenv("GROWLOG_THING_APP_REMOTE_ACCOUNT_ADDRESS")
// Initialize our application.
app := internal.InitThingApplication(dbHost, dbPort, dbUser, dbPassword, dbName, thingAddress, remoteAccountAddress)
// DEVELOPERS CODE:
// The following code will create an anonymous goroutine which will have a
// blocking chan `sigs`. This blocking chan will only unblock when the
// golang app receives a termination command; therfore the anyomous
// goroutine will run and terminate our running application.
//
// Special Thanks:
// (1) https://gobyexample.com/signals
// (2) https://guzalexander.com/2017/05/31/gracefully-exit-server-in-go.html
//
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigs // Block execution until signal from terminal gets triggered here.
fmt.Println("Starting graceful shut down now.")
app.StopMainRuntimeLoop()
}()
app.RunMainRuntimeLoop()
},
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
|
[
"\"GROWLOG_THING_DB_HOST\"",
"\"GROWLOG_THING_DB_PORT\"",
"\"GROWLOG_THING_DB_USER\"",
"\"GROWLOG_THING_DB_PASSWORD\"",
"\"GROWLOG_THING_DB_NAME\"",
"\"GROWLOG_THING_APP_ADDRESS\"",
"\"GROWLOG_THING_APP_REMOTE_ACCOUNT_ADDRESS\""
] |
[] |
[
"GROWLOG_THING_DB_HOST",
"GROWLOG_THING_DB_USER",
"GROWLOG_THING_DB_PASSWORD",
"GROWLOG_THING_APP_ADDRESS",
"GROWLOG_THING_DB_PORT",
"GROWLOG_THING_APP_REMOTE_ACCOUNT_ADDRESS",
"GROWLOG_THING_DB_NAME"
] |
[]
|
["GROWLOG_THING_DB_HOST", "GROWLOG_THING_DB_USER", "GROWLOG_THING_DB_PASSWORD", "GROWLOG_THING_APP_ADDRESS", "GROWLOG_THING_DB_PORT", "GROWLOG_THING_APP_REMOTE_ACCOUNT_ADDRESS", "GROWLOG_THING_DB_NAME"]
|
go
| 7 | 0 | |
api.go
|
package main
import (
"fmt"
"os"
"strconv"
"time"
"github.com/FederationOfFathers/xboxapi"
)
var cfgXboxAPI = os.Getenv("XBOXAPI")
var xbl *xboxapi.Client
func init() {
xbl = xboxapi.New(&xboxapi.Config{
APIKey: cfgXboxAPI,
Language: "en-US",
})
}
func getXboxTitleByInt(id int) (*xboxapi.Title, error) {
var rval *xboxapi.Title
cacheKey := fmt.Sprintf("xbox-title-by-int-%d", id)
ok, err := cacheGet(cacheKey, &rval)
if ok && err == nil {
return rval, err
}
hex := fmt.Sprintf("%x", id)
rval, err = xbl.GameDetailsHex(hex)
cacheSet(cacheKey, time.Hour*24*30, rval)
return rval, err
}
func getXboxTitleByString(id string) (*xboxapi.Title, error) {
intID, err := strconv.Atoi(id)
if err != nil {
return nil, err
}
return getXboxTitleByInt(intID)
}
|
[
"\"XBOXAPI\""
] |
[] |
[
"XBOXAPI"
] |
[]
|
["XBOXAPI"]
|
go
| 1 | 0 | |
ssh/ssh_internal_test.go
|
package ssh
import (
"bytes"
"fmt"
"os"
"testing"
. "github.com/drypycode/portscanner/utils"
env "github.com/joho/godotenv"
)
var err error = env.Load("../.env")
var TEST_CONF SSHConfig = SSHConfig{
Key: os.Getenv("SSH_KEY_PATH"), User: os.Getenv("REMOTE_USER"), RemoteHost: os.Getenv("REMOTE_HOST"), Port: "22",
}
func TestInspectOS(t *testing.T) {
check(err)
session := setupSession(TEST_CONF)
result := inspectRemoteOS("hostnamectl", session)
result = bytes.Trim(result, "\x00")
result = append(result, []byte("\n")...)
arch := getValue(result, "Architecture")
os := getValue(result, "Operating System")
AssertEquals(t, "", "arm64", arch)
AssertEquals(t, "", "Amazon Linux 2", os)
}
func TestRunCommand(t *testing.T) {
check(err)
session := setupSession(TEST_CONF)
cmd := "./main --ports=80,4423,100-105,40-45,1000-1200 --hosts='127.0.0.1,localhost,google.com' --output=/tmp/dat2.json --protocol=TCP"
runCommand(cmd, session)
}
// func TestBuildPSOnRemote(t *testing.T) {
// buildPSOnRemote(TEST_CONF)
// }
// CURRENTLY FAILING BECAUSE os.Getcwd is very fragile
// func TestJump(t *testing.T) {
// os.Args = []string{"go", "run", "main.go", "--ports=80,4423,100-105,40-45,1000-2000", "--hosts='127.0.0.1,localhost,google.com'", "--output=/tmp/dat2.json", "--protocol=TCP"}
// Jump(TEST_CONF)
// }
func TestRemoveRemoteFlags(t *testing.T) {
args := []string{
"go", "run", "main.go", "--ports=80,4423,100-105,40-45,1000-2000",
"--hosts='127.0.0.1,localhost,google.com'", "--output=/tmp/dat2.json",
"--protocol=TCP", fmt.Sprintf("--remote-host=%s", os.Getenv("REMOTE_HOST")),
"--remote-user=ec2-user", fmt.Sprintf("--ssh-key=%s", os.Getenv("SSH_KEY_PATH")),
"--jump",
}
removeRemoteFlags(args)
fmt.Println(args)
AssertEquals(t, "Removed elements", []string{
"go", "run", "main.go", "--ports=80,4423,100-105,40-45,1000-2000",
"--hosts='127.0.0.1,localhost,google.com'", "--output=/tmp/dat2.json", "--protocol=TCP", "", "", "", "",
}, args)
}
|
[
"\"SSH_KEY_PATH\"",
"\"REMOTE_USER\"",
"\"REMOTE_HOST\"",
"\"REMOTE_HOST\"",
"\"SSH_KEY_PATH\""
] |
[] |
[
"REMOTE_USER",
"SSH_KEY_PATH",
"REMOTE_HOST"
] |
[]
|
["REMOTE_USER", "SSH_KEY_PATH", "REMOTE_HOST"]
|
go
| 3 | 0 | |
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
|
// Copyright 2015 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package docker provides a client for the Docker remote API.
//
// See https://goo.gl/G3plxW for more details on the remote API.
package docker
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"time"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
"github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
)
const userAgent = "go-dockerclient"
var (
// ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
ErrInvalidEndpoint = errors.New("invalid endpoint")
// ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
apiVersion112, _ = NewAPIVersion("1.12")
apiVersion119, _ = NewAPIVersion("1.19")
)
// APIVersion is an internal representation of a version of the Remote API.
type APIVersion []int
// NewAPIVersion returns an instance of APIVersion for the given string.
//
// The given string must be in the form <major>.<minor>.<patch>, where <major>,
// <minor> and <patch> are integer numbers.
func NewAPIVersion(input string) (APIVersion, error) {
if !strings.Contains(input, ".") {
return nil, fmt.Errorf("Unable to parse version %q", input)
}
arr := strings.Split(input, ".")
ret := make(APIVersion, len(arr))
var err error
for i, val := range arr {
ret[i], err = strconv.Atoi(val)
if err != nil {
return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
}
}
return ret, nil
}
func (version APIVersion) String() string {
var str string
for i, val := range version {
str += strconv.Itoa(val)
if i < len(version)-1 {
str += "."
}
}
return str
}
// LessThan is a function for comparing APIVersion structs
func (version APIVersion) LessThan(other APIVersion) bool {
return version.compare(other) < 0
}
// LessThanOrEqualTo is a function for comparing APIVersion structs
func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
return version.compare(other) <= 0
}
// GreaterThan is a function for comparing APIVersion structs
func (version APIVersion) GreaterThan(other APIVersion) bool {
return version.compare(other) > 0
}
// GreaterThanOrEqualTo is a function for comparing APIVersion structs
func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
return version.compare(other) >= 0
}
func (version APIVersion) compare(other APIVersion) int {
for i, v := range version {
if i <= len(other)-1 {
otherVersion := other[i]
if v < otherVersion {
return -1
} else if v > otherVersion {
return 1
}
}
}
if len(version) > len(other) {
return 1
}
if len(version) < len(other) {
return -1
}
return 0
}
// Client is the basic type of this package. It provides methods for
// interaction with the API.
type Client struct {
SkipServerVersionCheck bool
HTTPClient *http.Client
TLSConfig *tls.Config
Dialer *net.Dialer
endpoint string
endpointURL *url.URL
eventMonitor *eventMonitoringState
requestedAPIVersion APIVersion
serverAPIVersion APIVersion
expectedAPIVersion APIVersion
unixHTTPClient *http.Client
}
// NewClient returns a Client instance ready for communication with the given
// server endpoint. It will use the latest remote API version available in the
// server.
func NewClient(endpoint string) (*Client, error) {
client, err := NewVersionedClient(endpoint, "")
if err != nil {
return nil, err
}
client.SkipServerVersionCheck = true
return client, nil
}
// NewTLSClient returns a Client instance ready for TLS communications with the givens
// server endpoint, key and certificates . It will use the latest remote API version
// available in the server.
func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
if err != nil {
return nil, err
}
client.SkipServerVersionCheck = true
return client, nil
}
// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
// server endpoint, key and certificates (passed inline to the function as opposed to being
// read from a local file). It will use the latest remote API version available in the server.
func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
if err != nil {
return nil, err
}
client.SkipServerVersionCheck = true
return client, nil
}
// NewVersionedClient returns a Client instance ready for communication with
// the given server endpoint, using a specific remote API version.
func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
u, err := parseEndpoint(endpoint, false)
if err != nil {
return nil, err
}
var requestedAPIVersion APIVersion
if strings.Contains(apiVersionString, ".") {
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
if err != nil {
return nil, err
}
}
return &Client{
HTTPClient: cleanhttp.DefaultClient(),
Dialer: &net.Dialer{},
endpoint: endpoint,
endpointURL: u,
eventMonitor: new(eventMonitoringState),
requestedAPIVersion: requestedAPIVersion,
}, nil
}
// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
}
// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
// server endpoint, key and certificates, using a specific remote API version.
func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
certPEMBlock, err := ioutil.ReadFile(cert)
if err != nil {
return nil, err
}
keyPEMBlock, err := ioutil.ReadFile(key)
if err != nil {
return nil, err
}
caPEMCert, err := ioutil.ReadFile(ca)
if err != nil {
return nil, err
}
return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
}
// NewClientFromEnv returns a Client instance ready for communication created from
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
//
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
func NewClientFromEnv() (*Client, error) {
client, err := NewVersionedClientFromEnv("")
if err != nil {
return nil, err
}
client.SkipServerVersionCheck = true
return client, nil
}
// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
// and using a specific remote API version.
//
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
dockerEnv, err := getDockerEnv()
if err != nil {
return nil, err
}
dockerHost := dockerEnv.dockerHost
if dockerEnv.dockerTLSVerify {
parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
}
cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
}
return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
}
// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
// server endpoint, key and certificates (passed inline to the function as opposed to being
// read from a local file), using a specific remote API version.
func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
u, err := parseEndpoint(endpoint, true)
if err != nil {
return nil, err
}
var requestedAPIVersion APIVersion
if strings.Contains(apiVersionString, ".") {
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
if err != nil {
return nil, err
}
}
if certPEMBlock == nil || keyPEMBlock == nil {
return nil, errors.New("Both cert and key are required")
}
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
if err != nil {
return nil, err
}
tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
if caPEMCert == nil {
tlsConfig.InsecureSkipVerify = true
} else {
caPool := x509.NewCertPool()
if !caPool.AppendCertsFromPEM(caPEMCert) {
return nil, errors.New("Could not add RootCA pem")
}
tlsConfig.RootCAs = caPool
}
tr := cleanhttp.DefaultTransport()
tr.TLSClientConfig = tlsConfig
if err != nil {
return nil, err
}
return &Client{
HTTPClient: &http.Client{Transport: tr},
TLSConfig: tlsConfig,
Dialer: &net.Dialer{},
endpoint: endpoint,
endpointURL: u,
eventMonitor: new(eventMonitoringState),
requestedAPIVersion: requestedAPIVersion,
}, nil
}
func (c *Client) checkAPIVersion() error {
serverAPIVersionString, err := c.getServerAPIVersionString()
if err != nil {
return err
}
c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
if err != nil {
return err
}
if c.requestedAPIVersion == nil {
c.expectedAPIVersion = c.serverAPIVersion
} else {
c.expectedAPIVersion = c.requestedAPIVersion
}
return nil
}
// Endpoint returns the current endpoint. It's useful for getting the endpoint
// when using functions that get this data from the environment (like
// NewClientFromEnv.
func (c *Client) Endpoint() string {
return c.endpoint
}
// Ping pings the docker server
//
// See https://goo.gl/kQCfJj for more details.
func (c *Client) Ping() error {
path := "/_ping"
resp, err := c.do("GET", path, doOptions{})
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return newError(resp)
}
resp.Body.Close()
return nil
}
func (c *Client) getServerAPIVersionString() (version string, err error) {
resp, err := c.do("GET", "/version", doOptions{})
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
}
var versionResponse map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
return "", err
}
if version, ok := (versionResponse["ApiVersion"]).(string); ok {
return version, nil
}
return "", nil
}
type doOptions struct {
data interface{}
forceJSON bool
headers map[string]string
}
func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
var params io.Reader
if doOptions.data != nil || doOptions.forceJSON {
buf, err := json.Marshal(doOptions.data)
if err != nil {
return nil, err
}
params = bytes.NewBuffer(buf)
}
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
err := c.checkAPIVersion()
if err != nil {
return nil, err
}
}
httpClient := c.HTTPClient
protocol := c.endpointURL.Scheme
var u string
if protocol == "unix" {
httpClient = c.unixClient()
u = c.getFakeUnixURL(path)
} else {
u = c.getURL(path)
}
req, err := http.NewRequest(method, u, params)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent)
if doOptions.data != nil {
req.Header.Set("Content-Type", "application/json")
} else if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
for k, v := range doOptions.headers {
req.Header.Set(k, v)
}
resp, err := httpClient.Do(req)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, ErrConnectionRefused
}
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return nil, newError(resp)
}
return resp, nil
}
type streamOptions struct {
setRawTerminal bool
rawJSONStream bool
useJSONDecoder bool
headers map[string]string
in io.Reader
stdout io.Writer
stderr io.Writer
// timeout is the inital connection timeout
timeout time.Duration
}
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
if (method == "POST" || method == "PUT") && streamOptions.in == nil {
streamOptions.in = bytes.NewReader(nil)
}
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
err := c.checkAPIVersion()
if err != nil {
return err
}
}
req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
if err != nil {
return err
}
req.Header.Set("User-Agent", userAgent)
if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
for key, val := range streamOptions.headers {
req.Header.Set(key, val)
}
var resp *http.Response
protocol := c.endpointURL.Scheme
address := c.endpointURL.Path
if streamOptions.stdout == nil {
streamOptions.stdout = ioutil.Discard
}
if streamOptions.stderr == nil {
streamOptions.stderr = ioutil.Discard
}
if protocol == "unix" {
dial, err := c.Dialer.Dial(protocol, address)
if err != nil {
return err
}
defer dial.Close()
breader := bufio.NewReader(dial)
err = req.Write(dial)
if err != nil {
return err
}
// ReadResponse may hang if server does not replay
if streamOptions.timeout > 0 {
dial.SetDeadline(time.Now().Add(streamOptions.timeout))
}
if resp, err = http.ReadResponse(breader, req); err != nil {
// Cancel timeout for future I/O operations
if streamOptions.timeout > 0 {
dial.SetDeadline(time.Time{})
}
if strings.Contains(err.Error(), "connection refused") {
return ErrConnectionRefused
}
return err
}
} else {
if resp, err = c.HTTPClient.Do(req); err != nil {
if strings.Contains(err.Error(), "connection refused") {
return ErrConnectionRefused
}
return err
}
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return newError(resp)
}
if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
// if we want to get raw json stream, just copy it back to output
// without decoding it
if streamOptions.rawJSONStream {
_, err = io.Copy(streamOptions.stdout, resp.Body)
return err
}
dec := json.NewDecoder(resp.Body)
for {
var m jsonMessage
if err := dec.Decode(&m); err == io.EOF {
break
} else if err != nil {
return err
}
if m.Stream != "" {
fmt.Fprint(streamOptions.stdout, m.Stream)
} else if m.Progress != "" {
fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
} else if m.Error != "" {
return errors.New(m.Error)
}
if m.Status != "" {
fmt.Fprintln(streamOptions.stdout, m.Status)
}
}
} else {
if streamOptions.setRawTerminal {
_, err = io.Copy(streamOptions.stdout, resp.Body)
} else {
_, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
}
return err
}
return nil
}
type hijackOptions struct {
success chan struct{}
setRawTerminal bool
in io.Reader
stdout io.Writer
stderr io.Writer
data interface{}
}
func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error {
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
err := c.checkAPIVersion()
if err != nil {
return err
}
}
var params io.Reader
if hijackOptions.data != nil {
buf, err := json.Marshal(hijackOptions.data)
if err != nil {
return err
}
params = bytes.NewBuffer(buf)
}
req, err := http.NewRequest(method, c.getURL(path), params)
if err != nil {
return err
}
req.Header.Set("Content-Type", "plain/text")
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", "tcp")
protocol := c.endpointURL.Scheme
address := c.endpointURL.Path
if protocol != "unix" {
protocol = "tcp"
address = c.endpointURL.Host
}
var dial net.Conn
if c.TLSConfig != nil && protocol != "unix" {
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
if err != nil {
return err
}
} else {
dial, err = c.Dialer.Dial(protocol, address)
if err != nil {
return err
}
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
clientconn.Do(req)
if hijackOptions.success != nil {
hijackOptions.success <- struct{}{}
<-hijackOptions.success
}
rwc, br := clientconn.Hijack()
defer rwc.Close()
errChanOut := make(chan error, 1)
errChanIn := make(chan error, 1)
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
close(errChanOut)
} else {
// Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
// Otherwise, if the only stream you care about is stdin, your attach session
// will "hang" until the container terminates, even though you're not reading
// stdout/stderr
if hijackOptions.stdout == nil {
hijackOptions.stdout = ioutil.Discard
}
if hijackOptions.stderr == nil {
hijackOptions.stderr = ioutil.Discard
}
go func() {
defer func() {
if hijackOptions.in != nil {
if closer, ok := hijackOptions.in.(io.Closer); ok {
closer.Close()
}
errChanIn <- nil
}
}()
var err error
if hijackOptions.setRawTerminal {
_, err = io.Copy(hijackOptions.stdout, br)
} else {
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
}
errChanOut <- err
}()
}
go func() {
var err error
if hijackOptions.in != nil {
_, err = io.Copy(rwc, hijackOptions.in)
}
errChanIn <- err
rwc.(interface {
CloseWrite() error
}).CloseWrite()
}()
errIn := <-errChanIn
errOut := <-errChanOut
if errIn != nil {
return errIn
}
return errOut
}
func (c *Client) getURL(path string) string {
urlStr := strings.TrimRight(c.endpointURL.String(), "/")
if c.endpointURL.Scheme == "unix" {
urlStr = ""
}
if c.requestedAPIVersion != nil {
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
}
return fmt.Sprintf("%s%s", urlStr, path)
}
// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX
// domain socket to the given path.
func (c *Client) getFakeUnixURL(path string) string {
u := *c.endpointURL // Copy.
// Override URL so that net/http will not complain.
u.Scheme = "http"
u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
u.Path = ""
urlStr := strings.TrimRight(u.String(), "/")
if c.requestedAPIVersion != nil {
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
}
return fmt.Sprintf("%s%s", urlStr, path)
}
func (c *Client) unixClient() *http.Client {
if c.unixHTTPClient != nil {
return c.unixHTTPClient
}
socketPath := c.endpointURL.Path
c.unixHTTPClient = &http.Client{
Transport: &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
return c.Dialer.Dial("unix", socketPath)
},
},
}
return c.unixHTTPClient
}
type jsonMessage struct {
Status string `json:"status,omitempty"`
Progress string `json:"progress,omitempty"`
Error string `json:"error,omitempty"`
Stream string `json:"stream,omitempty"`
}
func queryString(opts interface{}) string {
if opts == nil {
return ""
}
value := reflect.ValueOf(opts)
if value.Kind() == reflect.Ptr {
value = value.Elem()
}
if value.Kind() != reflect.Struct {
return ""
}
items := url.Values(map[string][]string{})
for i := 0; i < value.NumField(); i++ {
field := value.Type().Field(i)
if field.PkgPath != "" {
continue
}
key := field.Tag.Get("qs")
if key == "" {
key = strings.ToLower(field.Name)
} else if key == "-" {
continue
}
addQueryStringValue(items, key, value.Field(i))
}
return items.Encode()
}
func addQueryStringValue(items url.Values, key string, v reflect.Value) {
switch v.Kind() {
case reflect.Bool:
if v.Bool() {
items.Add(key, "1")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() > 0 {
items.Add(key, strconv.FormatInt(v.Int(), 10))
}
case reflect.Float32, reflect.Float64:
if v.Float() > 0 {
items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
}
case reflect.String:
if v.String() != "" {
items.Add(key, v.String())
}
case reflect.Ptr:
if !v.IsNil() {
if b, err := json.Marshal(v.Interface()); err == nil {
items.Add(key, string(b))
}
}
case reflect.Map:
if len(v.MapKeys()) > 0 {
if b, err := json.Marshal(v.Interface()); err == nil {
items.Add(key, string(b))
}
}
case reflect.Array, reflect.Slice:
vLen := v.Len()
if vLen > 0 {
for i := 0; i < vLen; i++ {
addQueryStringValue(items, key, v.Index(i))
}
}
}
}
// Error represents failures in the API. It represents a failure from the API.
type Error struct {
Status int
Message string
}
func newError(resp *http.Response) *Error {
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
}
return &Error{Status: resp.StatusCode, Message: string(data)}
}
func (e *Error) Error() string {
return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
}
func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
u, err := url.Parse(endpoint)
if err != nil {
return nil, ErrInvalidEndpoint
}
if tls {
u.Scheme = "https"
}
switch u.Scheme {
case "unix":
return u, nil
case "http", "https", "tcp":
_, port, err := net.SplitHostPort(u.Host)
if err != nil {
if e, ok := err.(*net.AddrError); ok {
if e.Err == "missing port in address" {
return u, nil
}
}
return nil, ErrInvalidEndpoint
}
number, err := strconv.ParseInt(port, 10, 64)
if err == nil && number > 0 && number < 65536 {
if u.Scheme == "tcp" {
if tls {
u.Scheme = "https"
} else {
u.Scheme = "http"
}
}
return u, nil
}
return nil, ErrInvalidEndpoint
default:
return nil, ErrInvalidEndpoint
}
}
type dockerEnv struct {
dockerHost string
dockerTLSVerify bool
dockerCertPath string
}
func getDockerEnv() (*dockerEnv, error) {
dockerHost := os.Getenv("DOCKER_HOST")
var err error
if dockerHost == "" {
dockerHost, err = DefaultDockerHost()
if err != nil {
return nil, err
}
}
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
var dockerCertPath string
if dockerTLSVerify {
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
if dockerCertPath == "" {
home := homedir.Get()
if home == "" {
return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
}
dockerCertPath = filepath.Join(home, ".docker")
dockerCertPath, err = filepath.Abs(dockerCertPath)
if err != nil {
return nil, err
}
}
}
return &dockerEnv{
dockerHost: dockerHost,
dockerTLSVerify: dockerTLSVerify,
dockerCertPath: dockerCertPath,
}, nil
}
// DefaultDockerHost returns the default docker socket for the current OS
func DefaultDockerHost() (string, error) {
var defaultHost string
if runtime.GOOS == "windows" {
// If we do not have a host, default to TCP socket on Windows
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
} else {
// If we do not have a host, default to unix socket
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
}
return opts.ValidateHost(defaultHost)
}
|
[
"\"DOCKER_HOST\"",
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_CERT_PATH\""
] |
[] |
[
"DOCKER_HOST",
"DOCKER_CERT_PATH",
"DOCKER_TLS_VERIFY"
] |
[]
|
["DOCKER_HOST", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"]
|
go
| 3 | 0 | |
src/main/java/com/example/ds/Stack01.java
|
package com.example.ds;
import java.util.Stack;
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Stack01 {
/*public static void main(String[] args) {
Stack<Character> stackChar;
}*/
// Complete the isBalanced function below.
static String isBalanced(String s) {
if (s == null || s.isEmpty()) {
return "YES";
}
// s=s.trim();
Stack<Character> stackChar = new Stack<Character>();
for (int i = 0; i < s.length(); i++) {
char charAt = s.charAt(i);
if (charAt == '(' || charAt == '[' || charAt == '{') {
stackChar.push(charAt);
continue;
}
if (charAt == ')') {
if (stackChar.isEmpty() || stackChar.pop() != '(') {
return "NO";
}
}
if (charAt == '}') {
if (stackChar.isEmpty() || stackChar.pop() != '{') {
return "NO";
}
}
if (charAt == ']') {
if (stackChar.isEmpty() || stackChar.pop() != '[') {
return "NO";
}
}
}
return stackChar.isEmpty() ? "YES" : "NO";
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int t = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int tItr = 0; tItr < t; tItr++) {
String s = scanner.nextLine();
String result = isBalanced(s);
bufferedWriter.write(result);
bufferedWriter.newLine();
}
bufferedWriter.close();
scanner.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
ui/handlers.go
|
package ui
import (
"io"
"os"
"os/exec"
"sort"
"strings"
"github.com/atotto/clipboard"
"github.com/ayntgl/discordgo"
"github.com/ayntgl/discordo/util"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
func onAppInputCapture(app *App, e *tcell.EventKey) *tcell.EventKey {
if app.MessageInputField.HasFocus() {
return e
}
switch e.Name() {
case app.Config.Keybindings.ToggleGuildsList:
app.SetFocus(app.GuildsList)
return nil
case app.Config.Keybindings.ToggleChannelsTreeView:
app.SetFocus(app.ChannelsTreeView)
return nil
case app.Config.Keybindings.ToggleMessagesTextView:
app.SetFocus(app.MessagesTextView)
return nil
case app.Config.Keybindings.ToggleMessageInputField:
app.SetFocus(app.MessageInputField)
return nil
}
return e
}
func onGuildsListSelected(app *App, guildIdx int) {
rootTreeNode := app.ChannelsTreeView.GetRoot()
rootTreeNode.ClearChildren()
app.SelectedMessage = -1
app.MessagesTextView.
Highlight().
Clear().
SetTitle("")
app.MessageInputField.SetText("")
// If the user is a bot account, the direct messages item does not exist in the guilds list.
if app.Session.State.User.Bot && guildIdx == 0 {
guildIdx = 1
}
if guildIdx == 0 { // Direct Messages
cs := app.Session.State.PrivateChannels
sort.Slice(cs, func(i, j int) bool {
return cs[i].LastMessageID > cs[j].LastMessageID
})
for _, c := range cs {
channelTreeNode := tview.NewTreeNode(util.ChannelToString(c)).
SetReference(c.ID)
rootTreeNode.AddChild(channelTreeNode)
}
} else { // Guild
cs := app.Session.State.Guilds[guildIdx-1].Channels
sort.Slice(cs, func(i, j int) bool {
return cs[i].Position < cs[j].Position
})
for _, c := range cs {
if (c.Type == discordgo.ChannelTypeGuildText || c.Type == discordgo.ChannelTypeGuildNews) && (c.ParentID == "") {
channelTreeNode := tview.NewTreeNode(util.ChannelToString(c)).
SetReference(c.ID)
rootTreeNode.AddChild(channelTreeNode)
}
}
CATEGORY:
for _, c := range cs {
if c.Type == discordgo.ChannelTypeGuildCategory {
for _, nestedChannel := range cs {
if nestedChannel.ParentID == c.ID {
channelTreeNode := tview.NewTreeNode(c.Name).
SetReference(c.ID)
rootTreeNode.AddChild(channelTreeNode)
continue CATEGORY
}
}
channelTreeNode := tview.NewTreeNode(c.Name).
SetReference(c.ID)
rootTreeNode.AddChild(channelTreeNode)
}
}
for _, c := range cs {
if (c.Type == discordgo.ChannelTypeGuildText || c.Type == discordgo.ChannelTypeGuildNews) && (c.ParentID != "") {
var parentTreeNode *tview.TreeNode
rootTreeNode.Walk(func(node, _ *tview.TreeNode) bool {
if node.GetReference() == c.ParentID {
parentTreeNode = node
return false
}
return true
})
if parentTreeNode != nil {
channelTreeNode := tview.NewTreeNode(util.ChannelToString(c)).
SetReference(c.ID)
parentTreeNode.AddChild(channelTreeNode)
}
}
}
}
app.ChannelsTreeView.SetCurrentNode(rootTreeNode)
app.SetFocus(app.ChannelsTreeView)
}
func onChannelsTreeViewSelected(app *App, n *tview.TreeNode) {
app.SelectedMessage = -1
app.MessagesTextView.
Highlight().
Clear()
app.MessageInputField.SetText("")
c, err := app.Session.State.Channel(n.GetReference().(string))
if err != nil {
return
}
if c.Type == discordgo.ChannelTypeGuildCategory {
n.SetExpanded(!n.IsExpanded())
return
}
app.SelectedChannel = c
app.MessagesTextView.SetTitle(util.ChannelToString(c))
app.SetFocus(app.MessageInputField)
go func() {
ms, err := app.Session.ChannelMessages(c.ID, app.Config.General.FetchMessagesLimit, "", "", "")
if err != nil {
return
}
for i := len(ms) - 1; i >= 0; i-- {
app.SelectedChannel.Messages = append(app.SelectedChannel.Messages, ms[i])
_, err = app.MessagesTextView.Write(buildMessage(app, ms[i]))
if err != nil {
return
}
}
app.MessagesTextView.ScrollToEnd()
}()
}
func onMessagesTextViewInputCapture(app *App, e *tcell.EventKey) *tcell.EventKey {
if app.SelectedChannel == nil {
return nil
}
ms := app.SelectedChannel.Messages
if len(ms) == 0 {
return nil
}
switch e.Name() {
case app.Config.Keybindings.SelectPreviousMessage:
if len(app.MessagesTextView.GetHighlights()) == 0 {
app.SelectedMessage = len(ms) - 1
} else {
app.SelectedMessage--
if app.SelectedMessage < 0 {
app.SelectedMessage = 0
}
}
app.MessagesTextView.
Highlight(ms[app.SelectedMessage].ID).
ScrollToHighlight()
return nil
case app.Config.Keybindings.SelectNextMessage:
if len(app.MessagesTextView.GetHighlights()) == 0 {
app.SelectedMessage = len(ms) - 1
} else {
app.SelectedMessage++
if app.SelectedMessage >= len(ms) {
app.SelectedMessage = len(ms) - 1
}
}
app.MessagesTextView.
Highlight(ms[app.SelectedMessage].ID).
ScrollToHighlight()
return nil
case app.Config.Keybindings.SelectFirstMessage:
app.SelectedMessage = 0
app.MessagesTextView.
Highlight(ms[app.SelectedMessage].ID).
ScrollToHighlight()
return nil
case app.Config.Keybindings.SelectLastMessage:
app.SelectedMessage = len(ms) - 1
app.MessagesTextView.
Highlight(ms[app.SelectedMessage].ID).
ScrollToHighlight()
return nil
case app.Config.Keybindings.ToggleMessageActionsList:
messageActionsList := tview.NewList()
hs := app.MessagesTextView.GetHighlights()
if len(hs) == 0 {
return nil
}
_, m := util.FindMessageByID(app.SelectedChannel.Messages, hs[0])
if m == nil {
return nil
}
if util.HasPermission(app.Session.State, app.SelectedChannel.ID, discordgo.PermissionSendMessages) {
messageActionsList.
AddItem("Reply", "", 'r', nil).
AddItem("Mention Reply", "", 'R', nil)
}
if m.ReferencedMessage != nil {
messageActionsList.AddItem("Select Reply", "", 'm', nil)
}
messageActionsList.
ShowSecondaryText(false).
AddItem("Copy Content", "", 'c', nil).
AddItem("Copy ID", "", 'i', nil).
SetDoneFunc(func() {
app.
SetRoot(app.MainFlex, true).
SetFocus(app.MessagesTextView)
}).
SetSelectedFunc(func(_ int, mainText string, _ string, _ rune) {
onMessageActionsListSelected(app, mainText, m)
}).
SetTitle("Press the Escape key to close").
SetBorder(true)
app.SetRoot(messageActionsList, true)
return nil
}
return e
}
func onMessageActionsListSelected(app *App, mainText string, m *discordgo.Message) {
switch mainText {
case "Copy Content":
if err := clipboard.WriteAll(m.Content); err != nil {
return
}
app.SetRoot(app.MainFlex, false)
case "Copy ID":
if err := clipboard.WriteAll(m.ID); err != nil {
return
}
app.SetRoot(app.MainFlex, false)
case "Reply":
app.MessageInputField.SetTitle("Replying to " + m.Author.String())
app.
SetRoot(app.MainFlex, false).
SetFocus(app.MessageInputField)
case "Mention Reply":
app.MessageInputField.SetTitle("[@] Replying to " + m.Author.String())
app.
SetRoot(app.MainFlex, false).
SetFocus(app.MessageInputField)
case "Select Reply":
app.SelectedMessage, _ = util.FindMessageByID(app.SelectedChannel.Messages, m.ReferencedMessage.ID)
app.MessagesTextView.
Highlight(m.ReferencedMessage.ID).
ScrollToHighlight()
app.
SetRoot(app.MainFlex, false).
SetFocus(app.MessagesTextView)
}
}
func onMessageInputFieldInputCapture(app *App, e *tcell.EventKey) *tcell.EventKey {
switch e.Name() {
case "Enter":
if app.SelectedChannel == nil {
return nil
}
t := strings.TrimSpace(app.MessageInputField.GetText())
if t == "" {
return nil
}
if len(app.MessagesTextView.GetHighlights()) != 0 {
_, m := util.FindMessageByID(app.SelectedChannel.Messages, app.MessagesTextView.GetHighlights()[0])
d := &discordgo.MessageSend{
Content: t,
Reference: m.Reference(),
AllowedMentions: &discordgo.MessageAllowedMentions{RepliedUser: false},
}
if strings.HasPrefix(app.MessageInputField.GetTitle(), "[@]") {
d.AllowedMentions.RepliedUser = true
} else {
d.AllowedMentions.RepliedUser = false
}
go app.Session.ChannelMessageSendComplex(m.ChannelID, d)
app.SelectedMessage = -1
app.MessagesTextView.Highlight()
app.MessageInputField.SetTitle("")
} else {
go app.Session.ChannelMessageSend(app.SelectedChannel.ID, t)
}
app.MessageInputField.SetText("")
return nil
case "Ctrl+V":
text, _ := clipboard.ReadAll()
text = app.MessageInputField.GetText() + text
app.MessageInputField.SetText(text)
return nil
case "Esc":
app.MessageInputField.
SetText("").
SetTitle("")
app.SetFocus(app.MainFlex)
app.SelectedMessage = -1
app.MessagesTextView.Highlight()
return nil
case app.Config.Keybindings.ToggleExternalEditor:
e := os.Getenv("EDITOR")
if e == "" {
return nil
}
f, err := os.CreateTemp(os.TempDir(), "discordo-*.md")
if err != nil {
return nil
}
defer os.Remove(f.Name())
cmd := exec.Command(e, f.Name())
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
app.Suspend(func() {
err = cmd.Run()
if err != nil {
return
}
})
b, err := io.ReadAll(f)
if err != nil {
return nil
}
app.MessageInputField.SetText(string(b))
return nil
}
return e
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to perform buffered inference using RNNT models.
Buffered inference is the primary form of audio transcription when the audio segment is longer than 20-30 seconds.
This is especially useful for models such as Conformers, which have quadratic time and memory scaling with
audio duration.
The difference between streaming and buffered inference is the chunk size (or the latency of inference).
Buffered inference will use large chunk sizes (5-10 seconds) + some additional buffer for context.
Streaming inference will use small chunk sizes (0.1 to 0.25 seconds) + some additional buffer for context.
# Middle Token merge algorithm
python speech_to_text_buffered_infer_rnnt.py \
--asr_model="<Path to a nemo model>" \
--test_manifest="<Path to a JSON manifest>" \
--model_stride=4 \
--output_path="." \
--total_buffer_in_secs=10.0 \
--chunk_len_in_secs=8.0 \
--device="cuda:0" \
--batch_size=128
# Longer Common Subsequence (LCS) Merge algorithm
python speech_to_text_buffered_infer_rnnt.py \
--asr_model="<Path to a nemo model>" \
--test_manifest="<Path to a JSON manifest>" \
--model_stride=4 \
--output_path="." \
--merge_algo="lcs" \
--lcs_alignment_dir=<OPTIONAL: Some path to store the LCS alignments> \
--total_buffer_in_secs=10.0 \
--chunk_len_in_secs=8.0 \
--device="cuda:0" \
--batch_size=128
# NOTE:
You can use `DEBUG=1 python speech_to_text_buffered_infer_rnnt.py ...` to print out the
ground truth text and predictions of the model.
"""
import copy
import json
import math
import os
from argparse import ArgumentParser
import torch
import tqdm
from omegaconf import OmegaConf, open_dict
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.parts.utils.streaming_utils import (
BatchedFrameASRRNNT,
LongestCommonSubsequenceBatchedFrameASRRNNT,
)
from nemo.utils import logging
can_gpu = torch.cuda.is_available()
# Common Arguments
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, required=True, help="Path to asr model .nemo file",
)
parser.add_argument("--test_manifest", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument(
"--total_buffer_in_secs",
type=float,
default=4.0,
help="Length of buffer (chunk + left and right padding) in seconds ",
)
parser.add_argument("--chunk_len_in_secs", type=float, default=1.6, help="Chunk length in seconds")
parser.add_argument("--output_path", type=str, help="path to output file", default=None)
parser.add_argument(
"--model_stride",
type=int,
default=8,
help="Model downsampling factor, 8 for Citrinet models and 4 for Conformer models",
)
parser.add_argument(
'--max_steps_per_timestep', type=int, default=5, help='Maximum number of tokens decoded per acoustic timestepB'
)
parser.add_argument('--stateful_decoding', action='store_true', help='Whether to perform stateful decoding')
parser.add_argument('--device', default=None, type=str, required=False)
# Merge algorithm for transducers
parser.add_argument(
'--merge_algo',
default='middle',
type=str,
required=False,
choices=['middle', 'lcs'],
help='Choice of algorithm to apply during inference.',
)
# LCS Merge Algorithm
parser.add_argument(
'--lcs_alignment_dir', type=str, default=None, help='Path to a directory to store LCS algo alignments'
)
def get_wer_feat(mfst, asr, tokens_per_chunk, delay, model_stride_in_secs, batch_size):
hyps = []
refs = []
audio_filepaths = []
with open(mfst, "r") as mfst_f:
print("Parsing manifest files...")
for l in mfst_f:
row = json.loads(l.strip())
audio_filepaths.append(row['audio_filepath'])
refs.append(row['text'])
with torch.inference_mode():
with torch.cuda.amp.autocast():
batch = []
asr.sample_offset = 0
for idx in tqdm.tqdm(range(len(audio_filepaths)), desc='Sample:', total=len(audio_filepaths)):
batch.append((audio_filepaths[idx], refs[idx]))
if len(batch) == batch_size:
audio_files = [sample[0] for sample in batch]
asr.reset()
asr.read_audio_file(audio_files, delay, model_stride_in_secs)
hyp_list = asr.transcribe(tokens_per_chunk, delay)
hyps.extend(hyp_list)
batch.clear()
asr.sample_offset += batch_size
if len(batch) > 0:
asr.batch_size = len(batch)
asr.frame_bufferer.batch_size = len(batch)
asr.reset()
audio_files = [sample[0] for sample in batch]
asr.read_audio_file(audio_files, delay, model_stride_in_secs)
hyp_list = asr.transcribe(tokens_per_chunk, delay)
hyps.extend(hyp_list)
batch.clear()
asr.sample_offset += len(batch)
if os.environ.get('DEBUG', '0') in ('1', 'y', 't'):
for hyp, ref in zip(hyps, refs):
print("hyp:", hyp)
print("ref:", ref)
wer = word_error_rate(hypotheses=hyps, references=refs)
return hyps, refs, wer
def main(args):
torch.set_grad_enabled(False)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model = nemo_asr.models.EncDecCTCModelBPE.restore_from(restore_path=args.asr_model)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained(model_name=args.asr_model)
cfg = copy.deepcopy(asr_model._cfg)
OmegaConf.set_struct(cfg.preprocessor, False)
# some changes for streaming scenario
cfg.preprocessor.dither = 0.0
cfg.preprocessor.pad_to = 0
if cfg.preprocessor.normalize != "per_feature":
logging.error("Only EncDecRNNTBPEModel models trained with per_feature normalization are supported currently")
device = args.device
if device is None:
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
logging.info(f"Inference will be done on device : {device}")
# Disable config overwriting
OmegaConf.set_struct(cfg.preprocessor, True)
asr_model.freeze()
asr_model = asr_model.to(device)
# Change Decoding Config
decoding_cfg = asr_model.cfg.decoding
with open_dict(decoding_cfg):
if args.stateful_decoding:
decoding_cfg.strategy = "greedy"
else:
decoding_cfg.strategy = "greedy_batch"
decoding_cfg.preserve_alignments = True # required to compute the middle token for transducers.
decoding_cfg.fused_batch_size = -1 # temporarily stop fused batch during inference.
asr_model.change_decoding_strategy(decoding_cfg)
feature_stride = cfg.preprocessor['window_stride']
model_stride_in_secs = feature_stride * args.model_stride
total_buffer = args.total_buffer_in_secs
chunk_len = float(args.chunk_len_in_secs)
tokens_per_chunk = math.ceil(chunk_len / model_stride_in_secs)
mid_delay = math.ceil((chunk_len + (total_buffer - chunk_len) / 2) / model_stride_in_secs)
print("Tokens per chunk :", tokens_per_chunk, "Min Delay :", mid_delay)
if args.merge_algo == 'middle':
frame_asr = BatchedFrameASRRNNT(
asr_model=asr_model,
frame_len=chunk_len,
total_buffer=args.total_buffer_in_secs,
batch_size=args.batch_size,
max_steps_per_timestep=args.max_steps_per_timestep,
stateful_decoding=args.stateful_decoding,
)
elif args.merge_algo == 'lcs':
frame_asr = LongestCommonSubsequenceBatchedFrameASRRNNT(
asr_model=asr_model,
frame_len=chunk_len,
total_buffer=args.total_buffer_in_secs,
batch_size=args.batch_size,
max_steps_per_timestep=args.max_steps_per_timestep,
stateful_decoding=args.stateful_decoding,
alignment_basepath=args.lcs_alignment_dir,
)
# Set the LCS algorithm delay.
frame_asr.lcs_delay = math.floor(((total_buffer - chunk_len)) / model_stride_in_secs)
else:
raise ValueError("Invalid choice of merge algorithm for transducer buffered inference.")
hyps, refs, wer = get_wer_feat(
mfst=args.test_manifest,
asr=frame_asr,
tokens_per_chunk=tokens_per_chunk,
delay=mid_delay,
model_stride_in_secs=model_stride_in_secs,
batch_size=args.batch_size,
)
logging.info(f"WER is {round(wer, 4)} when decoded with a delay of {round(mid_delay*model_stride_in_secs, 2)}s")
if args.output_path is not None:
fname = (
os.path.splitext(os.path.basename(args.asr_model))[0]
+ "_"
+ os.path.splitext(os.path.basename(args.test_manifest))[0]
+ "_"
+ str(args.chunk_len_in_secs)
+ "_"
+ str(int(total_buffer * 1000))
+ "_"
+ args.merge_algo
+ ".json"
)
hyp_json = os.path.join(args.output_path, fname)
os.makedirs(args.output_path, exist_ok=True)
with open(hyp_json, "w") as out_f:
for i, hyp in enumerate(hyps):
record = {
"pred_text": hyp,
"text": refs[i],
"wer": round(word_error_rate(hypotheses=[hyp], references=[refs[i]]) * 100, 2),
}
out_f.write(json.dumps(record) + '\n')
if __name__ == '__main__':
args = parser.parse_args()
main(args) # noqa pylint: disable=no-value-for-parameter
|
[] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
python
| 1 | 0 | |
cmd/postgres_exporter/postgres_exporter_integration_test.go
|
// These are specialized integration tests. We only build them when we're doing
// a lot of additional work to keep the external docker environment they require
// working.
// +build integration
package main
import (
"fmt"
"os"
"strings"
"testing"
_ "github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus"
. "gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type IntegrationSuite struct {
e *Exporter
}
var _ = Suite(&IntegrationSuite{})
func (s *IntegrationSuite) SetUpSuite(c *C) {
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(strings.Split(dsn, ","))
c.Assert(exporter, NotNil)
// Assign the exporter to the suite
s.e = exporter
prometheus.MustRegister(exporter)
}
// TODO: it would be nice if cu didn't mostly just recreate the scrape function
func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
for _, dsn := range s.e.dsn {
// Open a database connection
server, err := NewServer(dsn)
c.Assert(server, NotNil)
c.Assert(err, IsNil)
// Do a version update
err = s.e.checkMapVersions(ch, server)
c.Assert(err, IsNil)
err = querySettings(ch, server)
if !c.Check(err, Equals, nil) {
fmt.Println("## ERRORS FOUND")
fmt.Println(err)
}
// This should never happen in our test cases.
errMap := queryNamespaceMappings(ch, server)
if !c.Check(len(errMap), Equals, 0) {
fmt.Println("## NAMESPACE ERRORS FOUND")
for namespace, err := range errMap {
fmt.Println(namespace, ":", err)
}
}
server.Close()
}
}
// TestInvalidDsnDoesntCrash tests that specifying an invalid DSN doesn't crash
// the exporter. Related to https://github.com/wrouesnel/postgres_exporter/issues/93
// although not a replication of the scenario.
func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
// Send a bad DSN
exporter := NewExporter([]string{"invalid dsn"})
c.Assert(exporter, NotNil)
exporter.scrape(ch)
// Send a DSN to a non-listening port.
exporter = NewExporter([]string{"postgresql://nothing:[email protected]:1/nothing"})
c.Assert(exporter, NotNil)
exporter.scrape(ch)
}
// TestUnknownMetricParsingDoesntCrash deliberately deletes all the column maps out
// of an exporter to test that the default metric handling code can cope with unknown columns.
func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(strings.Split(dsn, ","))
c.Assert(exporter, NotNil)
// Convert the default maps into a list of empty maps.
emptyMaps := make(map[string]intermediateMetricMap, 0)
for k := range exporter.builtinMetricMaps {
emptyMaps[k] = intermediateMetricMap{
map[string]ColumnMapping{},
true,
0,
}
}
exporter.builtinMetricMaps = emptyMaps
// scrape the exporter and make sure it works
exporter.scrape(ch)
}
// TestExtendQueriesDoesntCrash tests that specifying extend.query-path doesn't
// crash.
func (s *IntegrationSuite) TestExtendQueriesDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(
strings.Split(dsn, ","),
WithUserQueriesPath("../user_queries_test.yaml"),
)
c.Assert(exporter, NotNil)
// scrape the exporter and make sure it works
exporter.scrape(ch)
}
|
[
"\"DATA_SOURCE_NAME\"",
"\"DATA_SOURCE_NAME\"",
"\"DATA_SOURCE_NAME\""
] |
[] |
[
"DATA_SOURCE_NAME"
] |
[]
|
["DATA_SOURCE_NAME"]
|
go
| 1 | 0 | |
ingress/ambassador/ambassador/ambassador/config.py
|
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import sys
import collections
import datetime
import json
import logging
import os
import re
from urllib.parse import urlparse
import jsonschema
import semantic_version
import yaml
from pkg_resources import Requirement, resource_filename
from jinja2 import Environment, FileSystemLoader
from .utils import RichStatus, SourcedDict, read_cert_secret, save_cert, TLSPaths, kube_v1, check_cert_file
from .mapping import Mapping
from scout import Scout
from .VERSION import Version
#############################################################################
## config.py -- the main configuration parser for Ambassador
##
## Ambassador configures itself by creating a new Config object, which calls
## Config.__init__().
##
## __init__() sets up all the defaults for everything, then walks over all the
## YAML it can find and calls self.load_yaml() to load each YAML file. After
## everything is loaded, it calls self.process_all_objects() to build the
## config objects.
##
## load_yaml() does the heavy lifting around YAML parsing and such, including
## managing K8s annotations if so requested. Every object in every YAML file is
## parsed and saved before any object is processed.
##
## process_all_objects() walks all the saved objects and creates an internal
## representation of the Ambassador config in the data structures initialized
## by __init__(). Each object is processed with self.process_object(). This
## internal representation is called the intermediate config.
##
## process_object() handles a single parsed object from YAML. It uses
## self.validate_object() to make sure of a schema match; assuming that's
## good, most of the heavy lifting is done by a handler method. The handler
## method for a given type is named handle_kind(), with kind in lowercase,
## so e.g. the Mapping object is processed using the handle_mapping() method.
##
## After all of that, the actual Envoy config is generated from the intermediate
## config using generate_envoy_config().
##
## The diag service also uses generate_intermediate_for() to extract the
## intermediate config for a given mapping or service.
def get_semver(what, version_string):
semver = None
try:
semver = semantic_version.Version(version_string)
except ValueError:
pass
return semver
class Config (object):
# Weird stuff. The build version looks like
#
# 0.12.0 for a prod build, or
# 0.12.1-b2.da5d895.DIRTY for a dev build (in this case made from a dirty true)
#
# Now:
# - Scout needs a build number (semver "+something") to flag a non-prod release;
# but
# - DockerHub cannot use a build number at all; but
# - 0.12.1-b2 comes _before_ 0.12.1+b2 in SemVer land.
#
# FFS.
#
# We cope with this by transforming e.g.
#
# 0.12.1-b2.da5d895.DIRTY into 0.12.1-b2+da5d895.DIRTY
#
# for Scout.
scout_version = Version
if '-' in scout_version:
# TODO(plombardi): This version code needs to be rewritten. We should only report RC and GA versions.
#
# As of the time when we moved to streamlined branch, merge and release model the way versions in development
# land are rendered has changed. A development version no longer has any <MAJOR>.<MINOR>.<PATCH> information and
# is instead rendered as <BRANCH_NAME>-<GIT_SHORT_HASH>[-dirty] where [-dirty] is only appended for modified
# source trees.
#
# Long term we are planning to remove the version report for development branches anyways so all of this
# formatting for versions
scout_version = "0.0.0-" + Version.split("-")[1] # middle part is commit hash
# Dev build!
# v, p = scout_version.split('-')
# p, b = p.split('.', 1) if ('.' in p) else (0, p)
#
# scout_version = "%s-%s+%s" % (v, p, b)
# Use scout_version here, not __version__, because the version
# coming back from Scout will use build numbers for dev builds, but
# __version__ won't, and we need to be consistent for comparison.
current_semver = get_semver("current", scout_version)
# When using multiple Ambassadors in one cluster, use AMBASSADOR_ID to distinguish them.
ambassador_id = os.environ.get('AMBASSADOR_ID', 'default')
runtime = "kubernetes" if os.environ.get('KUBERNETES_SERVICE_HOST', None) else "docker"
namespace = os.environ.get('AMBASSADOR_NAMESPACE', 'default')
# Default to using the Nil UUID unless the environment variable is set explicitly
scout_install_id = os.environ.get('AMBASSADOR_SCOUT_ID', "00000000-0000-0000-0000-000000000000")
try:
scout = Scout(app="ambassador", version=scout_version, install_id=scout_install_id)
scout_error = None
except OSError as e:
scout_error = e
scout_latest_version = None
scout_latest_semver = None
scout_notices = []
scout_last_response = None
scout_last_update = datetime.datetime.now() - datetime.timedelta(hours=24)
scout_update_frequency = datetime.timedelta(hours=4)
@classmethod
def scout_report(klass, force_result=None, **kwargs):
_notices = []
env_result = os.environ.get("AMBASSADOR_SCOUT_RESULT", None)
if env_result:
force_result = json.loads(env_result)
result = force_result
result_timestamp = None
result_was_cached = False
if not result:
if Config.scout:
if 'runtime' not in kwargs:
kwargs['runtime'] = Config.runtime
# How long since the last Scout update? If it's been more than an hour,
# check Scout again.
now = datetime.datetime.now()
if (now - Config.scout_last_update) > Config.scout_update_frequency:
result = Config.scout.report(**kwargs)
Config.scout_last_update = now
Config.scout_last_result = dict(**result)
else:
# _notices.append({ "level": "debug", "message": "Returning cached result" })
result = dict(**Config.scout_last_result)
result_was_cached = True
result_timestamp = Config.scout_last_update
else:
result = { "scout": "unavailable" }
result_timestamp = datetime.datetime.now()
else:
_notices.append({ "level": "debug", "message": "Returning forced result" })
result_timestamp = datetime.datetime.now()
if not Config.current_semver:
_notices.append({
"level": "warning",
"message": "Ambassador has bad version '%s'??!" % Config.scout_version
})
result['cached'] = result_was_cached
result['timestamp'] = result_timestamp.timestamp()
# Do version & notices stuff.
if 'latest_version' in result:
latest_version = result['latest_version']
latest_semver = get_semver("latest", latest_version)
if latest_semver:
Config.scout_latest_version = latest_version
Config.scout_latest_semver = latest_semver
else:
_notices.append({
"level": "warning",
"message": "Scout returned bad version '%s'??!" % latest_version
})
if (Config.scout_latest_semver and
((not Config.current_semver) or
(Config.scout_latest_semver > Config.current_semver))):
_notices.append({
"level": "info",
"message": "Upgrade available! to Ambassador version %s" % Config.scout_latest_semver
})
if 'notices' in result:
_notices.extend(result['notices'])
Config.scout_notices = _notices
return result
def __init__(self, config_dir_path, k8s=False, schema_dir_path=None, template_dir_path=None):
self.config_dir_path = config_dir_path
if not template_dir_path:
template_dir_path = resource_filename(Requirement.parse("ambassador"),"templates")
if not schema_dir_path:
schema_dir_path = resource_filename(Requirement.parse("ambassador"),"schemas")
self.schema_dir_path = schema_dir_path
self.template_dir_path = template_dir_path
self.namespace = os.environ.get('AMBASSADOR_NAMESPACE', 'default')
self.logger = logging.getLogger("ambassador.config")
self.logger.debug("Scout version %s" % Config.scout_version)
self.logger.debug("Runtime %s" % Config.runtime)
self.logger.debug("CONFIG DIR %s" % os.path.abspath(self.config_dir_path))
self.logger.debug("TEMPLATE DIR %s" % os.path.abspath(self.template_dir_path))
self.logger.debug("SCHEMA DIR %s" % os.path.abspath(self.schema_dir_path))
if Config.scout_error:
self.logger.warning("Couldn't do version check: %s" % str(Config.scout_error))
self.schemas = {}
self.config = {}
self.tls_contexts = {}
self.envoy_config = {}
self.envoy_clusters = {}
self.envoy_routes = {}
self.sources = {
"--internal--": {
"_source": "--internal--",
"kind": "Internal",
"version": "v0",
"name": "Ambassador Internals",
"filename": "--internal--",
"index": 0,
"description": "The '--internal--' source marks objects created by Ambassador's internal logic."
},
"--diagnostics--": {
"_source": "--diagnostics--",
"kind": "diagnostics",
"version": "v0",
"name": "Ambassador Diagnostics",
"filename": "--diagnostics--",
"index": 0,
"description": "The '--diagnostics--' source marks objects created by Ambassador to assist with diagnostic output."
}
}
self.source_map = {
'--internal--': { '--internal--': True }
}
self.source_overrides = {}
self.default_liveness_probe = {
"enabled": True,
"prefix": "/ambassador/v0/check_alive",
"rewrite": "/ambassador/v0/check_alive",
# "service" gets added later
}
self.default_readiness_probe = {
"enabled": True,
"prefix": "/ambassador/v0/check_ready",
"rewrite": "/ambassador/v0/check_ready",
# "service" gets added later
}
self.default_diagnostics = {
"enabled": True,
"prefix": "/ambassador/v0/",
"rewrite": "/ambassador/v0/",
# "service" gets added later
}
# 'server' and 'client' are special contexts. Others
# use cert_chain_file defaulting to context.crt,
# private_key_file (context.key), and cacert_chain_file
# (context.pem).
self.default_tls_config = {
"server": {},
"client": {},
}
if os.path.isfile(TLSPaths.mount_tls_crt.value):
self.default_tls_config["server"]["cert_chain_file"] = TLSPaths.mount_tls_crt.value
if os.path.isfile(TLSPaths.mount_tls_key.value):
self.default_tls_config["server"]["private_key_file"] = TLSPaths.mount_tls_key.value
if os.path.isfile(TLSPaths.client_mount_crt.value):
self.default_tls_config["client"]["cacert_chain_file"] = TLSPaths.client_mount_crt.value
self.tls_config = None
self.errors = {}
self.fatal_errors = 0
self.object_errors = 0
self.objects_to_process = []
if not os.path.isdir(self.config_dir_path):
raise Exception("ERROR ERROR ERROR configuration directory %s does not exist; exiting" % self.config_dir_path)
for dirpath, dirnames, filenames in os.walk(self.config_dir_path, topdown=True):
# Modify dirnames in-place (dirs[:]) to remove any weird directories
# whose names start with '.' -- why? because my GKE cluster mounts my
# ConfigMap with a self-referential directory named
# /etc/ambassador-config/..9989_25_09_15_43_06.922818753, and if we don't
# ignore that, we end up trying to read the same config files twice, which
# triggers the collision checks. Sigh.
dirnames[:] = sorted([ d for d in dirnames if not d.startswith('.') ])
# self.logger.debug("WALK %s: dirs %s, files %s" % (dirpath, dirnames, filenames))
for filename in sorted([ x for x in filenames if x.endswith(".yaml") ]):
filepath = os.path.join(dirpath, filename)
self.load_yaml(filepath, filename, open(filepath, "r").read(), ocount=1, k8s=k8s)
self.process_all_objects()
if self.fatal_errors:
# Kaboom.
raise Exception("ERROR ERROR ERROR Unparseable configuration; exiting")
if self.errors:
self.logger.error("ERROR ERROR ERROR Starting with configuration errors")
self.generate_intermediate_config()
def load_yaml(self, filepath, filename, serialization, resource_identifier=None, ocount=1, k8s=False):
try:
# XXX This is a bit of a hack -- yaml.safe_load_all returns a
# generator, and if we don't use list() here, any exception
# dealing with the actual object gets deferred
for obj in yaml.safe_load_all(serialization):
if k8s:
ocount = self.prep_k8s(filepath, filename, ocount, obj)
else:
# k8s objects will have an identifier, for other objects use filepath
object_unique_id = resource_identifier or filepath
self.objects_to_process.append((object_unique_id, filename, ocount, obj))
ocount += 1
except Exception as e:
# No sense letting one attribute with bad YAML take down the whole
# gateway, so post the error but keep any objects we were able to
# parse before hitting the error.
self.resource_identifier = resource_identifier or filepath
self.filename = filename
self.ocount = ocount
self.post_error(RichStatus.fromError("%s: could not parse YAML" % filepath))
return ocount
def prep_k8s(self, filepath, filename, ocount, obj):
kind = obj.get('kind', None)
if kind != "Service":
self.logger.debug("%s/%s: ignoring K8s %s object" %
(filepath, ocount, kind))
return ocount
metadata = obj.get('metadata', None)
if not metadata:
self.logger.debug("%s/%s: ignoring unannotated K8s %s" %
(filepath, ocount, kind))
return ocount
# Use metadata to build an unique resource identifier
resource_name = metadata.get('name')
# This should never happen as the name field is required in metadata for Service
if not resource_name:
self.logger.debug("%s/%s: ignoring unnamed K8s %s" %
(filepath, ocount, kind))
return ocount
resource_namespace = metadata.get('namespace', 'default')
# This resource identifier is useful for log output since filenames can be duplicated (multiple subdirectories)
resource_identifier = '{name}.{namespace}'.format(namespace=resource_namespace, name=resource_name)
annotations = metadata.get('annotations', None)
if annotations:
annotations = annotations.get('getambassador.io/config', None)
# self.logger.debug("annotations %s" % annotations)
if not annotations:
self.logger.debug("%s/%s: ignoring K8s %s without Ambassador annotation" %
(filepath, ocount, kind))
return ocount
return self.load_yaml(filepath, filename + ":annotation", annotations, ocount=ocount, resource_identifier=resource_identifier)
def process_all_objects(self):
for resource_identifier, filename, ocount, obj in sorted(self.objects_to_process):
# resource_identifier is either a filepath or <name>.<namespace>
self.resource_identifier = resource_identifier
# This fallback prevents issues for internal/diagnostics objects
self.filename = filename
self.ocount = ocount
if self.filename in self.source_overrides:
# Let Pragma objects override source information for this filename.
override = self.source_overrides[self.filename]
self.source = override.get('source', self.filename)
self.ocount += override.get('ocount_delta', 0)
else:
# No pragma involved here; just default to the filename.
self.source = self.filename
# Is the object empty?
if obj == None :
self.logger.debug("Annotation has empty config")
return
# Is an ambassador_id present in this object?
allowed_ids = obj.get('ambassador_id', 'default')
if allowed_ids:
# Make sure it's a list. Yes, this is Draconian,
# but the jsonschema will allow only a string or a list,
# and guess what? Strings are iterables.
if type(allowed_ids) != list:
allowed_ids = [ allowed_ids ]
if Config.ambassador_id not in allowed_ids:
self.logger.debug("PROCESS: skip %s.%d; id %s not in %s" %
(self.resource_identifier, self.ocount, Config.ambassador_id, allowed_ids))
continue
self.logger.debug("PROCESS: %s.%d => %s" % (self.resource_identifier, self.ocount, self.source))
rc = self.process_object(obj)
if not rc:
# Object error. Not good but we'll allow the system to start.
self.post_error(rc)
def clean_and_copy(self, d):
out = []
for key in sorted(d.keys()):
original = d[key]
copy = dict(**original)
if '_source' in original:
del(original['_source'])
if '_referenced_by' in original:
del(original['_referenced_by'])
out.append(copy)
return out
def current_source_key(self):
return("%s.%d" % (self.filename, self.ocount))
def post_error(self, rc, key=None):
if not key:
key = self.current_source_key()
# Yuck.
filename = re.sub(r'\.\d+$', '', key)
# Fetch the relevant source info. If it doesn't exist, stuff
# in a fake record.
source_info = self.sources.setdefault(key, {
'kind': 'error',
'version': 'error',
'name': 'error',
'filename': filename,
'index': self.ocount,
'yaml': 'error'
})
source_info.setdefault('errors', [])
source_info['errors'].append(rc.toDict())
source_map = self.source_map.setdefault(filename, {})
source_map[key] = True
errors = self.errors.setdefault(key, [])
errors.append(rc.toDict())
self.logger.error("%s (%s): %s" % (key, filename, rc))
def process_object(self, obj):
# Cache the source key first thing...
source_key = self.current_source_key()
# This should be impossible.
if not obj:
return RichStatus.fromError("undefined object???")
try:
obj_version = obj['apiVersion']
obj_kind = obj['kind']
except KeyError:
return RichStatus.fromError("need apiVersion, kind")
# Is this a pragma object?
if obj_kind == 'Pragma':
# Yes. Handle this inline and be done.
return self.handle_pragma(source_key, obj)
# Not a pragma. It needs a name...
if 'name' not in obj:
return RichStatus.fromError("need name")
obj_name = obj['name']
# ...and off we go. Save the source info...
self.sources[source_key] = {
'kind': obj_kind,
'version': obj_version,
'name': obj_name,
'filename': self.filename,
'index': self.ocount,
'yaml': yaml.safe_dump(obj, default_flow_style=False)
}
# ...and figure out if this thing is OK.
rc = self.validate_object(obj)
if not rc:
# Well that's no good.
return rc
# Make sure it has a source: use what's in the object if present,
# otherwise use self.source.
self.sources[source_key]['_source'] = obj.get('source', self.source)
# self.logger.debug("source for %s is %s" % (source_key, self.sources[source_key]['_source']))
source_map = self.source_map.setdefault(self.filename, {})
source_map[source_key] = True
# OK, so far so good. Grab the handler for this object type.
handler_name = "handle_%s" % obj_kind.lower()
handler = getattr(self, handler_name, None)
if not handler:
handler = self.save_object
self.logger.warning("%s[%d]: no handler for %s, just saving" %
(self.resource_identifier, self.ocount, obj_kind))
# else:
# self.logger.debug("%s[%d]: handling %s..." %
# (self.filename, self.ocount, obj_kind))
try:
handler(source_key, obj, obj_name, obj_kind, obj_version)
except Exception as e:
# Bzzzt.
return RichStatus.fromError("could not process %s object: %s" % (obj_kind, e))
# OK, all's well.
return RichStatus.OK(msg="%s object processed successfully" % obj_kind)
def validate_object(self, obj):
# Each object must be a dict, and must include "apiVersion"
# and "type" at toplevel.
if not isinstance(obj, collections.Mapping):
return RichStatus.fromError("not a dictionary")
if not (("apiVersion" in obj) and ("kind" in obj) and ("name" in obj)):
return RichStatus.fromError("must have apiVersion, kind, and name")
obj_version = obj['apiVersion']
obj_kind = obj['kind']
obj_name = obj['name']
if obj_version.startswith("ambassador/"):
obj_version = obj_version.split('/')[1]
else:
return RichStatus.fromError("apiVersion %s unsupported" % obj_version)
schema_key = "%s-%s" % (obj_version, obj_kind)
schema = self.schemas.get(schema_key, None)
if not schema:
schema_path = os.path.join(self.schema_dir_path, obj_version,
"%s.schema" % obj_kind)
try:
schema = json.load(open(schema_path, "r"))
except OSError:
self.logger.debug("no schema at %s, skipping" % schema_path)
except json.decoder.JSONDecodeError as e:
self.logger.warning("corrupt schema at %s, skipping (%s)" %
(schema_path, e))
if schema:
self.schemas[schema_key] = schema
try:
jsonschema.validate(obj, schema)
except jsonschema.exceptions.ValidationError as e:
return RichStatus.fromError("not a valid %s: %s" % (obj_kind, e))
return RichStatus.OK(msg="valid %s" % obj_kind,
details=(obj_kind, obj_version, obj_name))
def safe_store(self, source_key, storage_name, obj_name, obj_kind, value, allow_log=True):
storage = self.config.setdefault(storage_name, {})
if obj_name in storage:
# Oooops.
raise Exception("%s[%d] defines %s %s, which is already present" %
(self.resource_identifier, self.ocount, obj_kind, obj_name))
if allow_log:
self.logger.debug("%s[%d]: saving %s %s" %
(self.resource_identifier, self.ocount, obj_kind, obj_name))
storage[obj_name] = value
return storage[obj_name]
def save_object(self, source_key, obj, obj_name, obj_kind, obj_version):
return self.safe_store(source_key, obj_kind, obj_name, obj_kind,
SourcedDict(_source=source_key, **obj))
def handle_pragma(self, source_key, obj):
keylist = sorted([x for x in sorted(obj.keys()) if ((x != 'apiVersion') and (x != 'kind'))])
# self.logger.debug("PRAGMA: %s" % keylist)
for key in keylist:
if key == 'source':
override = self.source_overrides.setdefault(self.filename, {})
override['source'] = obj['source']
self.logger.debug("PRAGMA: override %s to %s" %
(self.resource_identifier, self.source_overrides[self.filename]['source']))
elif key == 'autogenerated':
override = self.source_overrides.setdefault(self.filename, {})
override['ocount_delta'] = -1
# self.logger.debug("PRAGMA: autogenerated, setting ocount_delta to -1")
# else:
# self.logger.debug("PRAGMA: skip %s" % key)
return RichStatus.OK(msg="handled pragma object")
def handle_module(self, source_key, obj, obj_name, obj_kind, obj_version):
return self.safe_store(source_key, "modules", obj_name, obj_kind,
SourcedDict(_source=source_key, **obj['config']))
def handle_ratelimitservice(self, source_key, obj, obj_name, obj_kind, obj_version):
return self.safe_store(source_key, "ratelimit_configs", obj_name, obj_kind,
SourcedDict(_source=source_key, **obj))
def handle_tracingservice(self, source_key, obj, obj_name, obj_kind, obj_version):
return self.safe_store(source_key, "tracing_configs", obj_name, obj_kind,
SourcedDict(_source=source_key, **obj))
def handle_authservice(self, source_key, obj, obj_name, obj_kind, obj_version):
return self.safe_store(source_key, "auth_configs", obj_name, obj_kind,
SourcedDict(_source=source_key, **obj))
def handle_mapping(self, source_key, obj, obj_name, obj_kind, obj_version):
mapping = Mapping(source_key, **obj)
return self.safe_store(source_key, "mappings", obj_name, obj_kind, mapping)
def diag_port(self):
modules = self.config.get("modules", {})
amod = modules.get("ambassador", {})
return amod.get("diag_port", 8877)
def diag_service(self):
return "127.0.0.1:%d" % self.diag_port()
def add_intermediate_cluster(self, _source, name, _service, urls,
type="strict_dns", lb_type="round_robin",
cb_name=None, od_name=None, originate_tls=None,
grpc=False, host_rewrite=None, ssl_context=None):
if name not in self.envoy_clusters:
self.logger.debug("CLUSTER %s: new from %s" % (name, _source))
cluster = SourcedDict(
_source=_source,
_referenced_by=[ _source ],
_service=_service,
name=name,
type=type,
lb_type=lb_type,
urls=urls
)
if cb_name and (cb_name in self.breakers):
cluster['circuit_breakers'] = self.breakers[cb_name]
self.breakers[cb_name]._mark_referenced_by(_source)
if od_name and (od_name in self.outliers):
cluster['outlier_detection'] = self.outliers[od_name]
self.outliers[od_name]._mark_referenced_by(_source)
if originate_tls == True:
cluster['tls_context'] = { '_ambassador_enabled': True }
cluster['tls_array'] = []
elif (originate_tls and (originate_tls in self.tls_contexts)):
cluster['tls_context'] = self.tls_contexts[originate_tls]
self.tls_contexts[originate_tls]._mark_referenced_by(_source)
tls_array = []
for key, value in cluster['tls_context'].items():
if key.startswith('_'):
continue
tls_array.append({ 'key': key, 'value': value })
cluster['tls_array'] = sorted(tls_array, key=lambda x: x['key'])
elif ssl_context:
cluster['tls_context'] = ssl_context
tls_array = []
for key, value in ssl_context.items():
tls_array.append({ 'key': key, 'value': value })
cluster['tls_array'] = sorted(tls_array, key=lambda x: x['key'])
if host_rewrite and originate_tls:
cluster['tls_array'].append({'key': 'sni', 'value': host_rewrite })
if grpc:
cluster['features'] = 'http2'
self.envoy_clusters[name] = cluster
else:
self.logger.debug("CLUSTER %s: referenced by %s" % (name, _source))
self.envoy_clusters[name]._mark_referenced_by(_source)
# XXX This is a silly API. We should have a Cluster object that can carry what kind
# of cluster it is (this is a target cluster of weight 50%, this is a shadow cluster,
# whatever) and the API should be "add this cluster to this Mapping".
def add_intermediate_route(self, _source, mapping, svc, cluster_name, shadow=False):
route = self.envoy_routes.get(mapping.group_id, None)
host_redirect = mapping.get('host_redirect', False)
shadow = mapping.get('shadow', False)
if route:
# Is this a host_redirect? If so, that's an error.
if host_redirect:
self.logger.error("ignoring non-unique host_redirect mapping %s (see also %s)" %
(mapping['name'], route['_source']))
# Is this a shadow? If so, is there already a shadow marked?
elif shadow:
extant_shadow = route.get('shadow', None)
if extant_shadow:
shadow_name = extant_shadow.get('name', None)
if shadow_name != cluster_name:
self.logger.error("mapping %s defines multiple shadows! Ignoring %s" %
(mapping['name'], cluster_name))
else:
# XXX CODE DUPLICATION with mapping.py!!
# We're going to need to support shadow weighting later, so use a dict here.
route['shadow'] = {
'name': cluster_name
}
route.setdefault('clusters', [])
else:
# Take the easy way out -- just add a new entry to this
# route's set of weighted clusters.
route["clusters"].append( { "name": cluster_name,
"weight": mapping.attrs.get("weight", None) } )
route._mark_referenced_by(_source)
return
# OK, if here, we don't have an extent route group for this Mapping. Make a
# new one.
route = mapping.new_route(svc, cluster_name)
self.envoy_routes[mapping.group_id] = route
def service_tls_check(self, svc, context, host_rewrite):
originate_tls = False
name_fields = None
if svc.lower().startswith("http://"):
originate_tls = False
svc = svc[len("http://"):]
elif svc.lower().startswith("https://"):
originate_tls = True
name_fields = [ 'otls' ]
svc = svc[len("https://"):]
elif context == True:
originate_tls = True
name_fields = [ 'otls' ]
# Separate if here because you need to be able to specify a context
# even after you say "https://" for the service.
if context and (context != True):
if context in self.tls_contexts:
name_fields = [ 'otls', context ]
originate_tls = context
else:
self.logger.error("Originate-TLS context %s is not defined" % context)
if originate_tls and host_rewrite:
name_fields.append("hr-%s" % host_rewrite)
port = 443 if originate_tls else 80
context_name = "_".join(name_fields) if name_fields else None
svc_url = 'tcp://%s' % svc
if ':' not in svc:
svc_url = '%s:%d' % (svc_url, port)
return (svc, svc_url, originate_tls, context_name)
def add_clusters_for_mapping(self, mapping):
svc = mapping['service']
tls_context = mapping.get('tls', None)
grpc = mapping.get('grpc', False)
host_rewrite = mapping.get('host_rewrite', None)
# Given the service and the TLS context, first initialize the cluster name for the
# main service with the incoming service string...
cluster_name_fields = [ svc ]
host_redirect = mapping.get('host_redirect', False)
shadow = mapping.get('shadow', False)
if host_redirect:
if shadow:
# Not allowed.
errstr = "At most one of host_redirect and shadow may be set; ignoring host_redirect"
self.post_error(RichStatus.fromError(errstr), key=mapping['_source'])
host_redirect = False
else:
# Short-circuit. You needn't actually create a cluster for a
# host_redirect mapping.
return svc, None
if shadow:
cluster_name_fields.insert(0, "shadow")
# ...then do whatever normalization we need for the name and the URL. This can
# change the service name (e.g. "http://foo" will turn into "foo"), so we set
# up cluster_name_fields above in order to preserve compatibility with older
# versions of Ambassador. (This isn't a functional issue, just a matter of
# trying not to confuse people on upgrades.)
(svc, url, originate_tls, otls_name) = self.service_tls_check(svc, tls_context, host_rewrite)
# Build up the common name stuff that we'll need for the service and
# the shadow service.
aux_name_fields = []
cb_name = mapping.get('circuit_breaker', None)
if cb_name:
if cb_name in self.breakers:
aux_name_fields.append("cb_%s" % cb_name)
else:
self.logger.error("CircuitBreaker %s is not defined (mapping %s)" %
(cb_name, mapping.name))
od_name = mapping.get('outlier_detection', None)
if od_name:
if od_name in self.outliers:
aux_name_fields.append("od_%s" % od_name)
else:
self.logger.error("OutlierDetection %s is not defined (mapping %s)" %
(od_name, mapping.name))
# OK. Use the main service stuff to build up the main clustor.
if otls_name:
cluster_name_fields.append(otls_name)
cluster_name_fields.extend(aux_name_fields)
cluster_name = 'cluster_%s' % "_".join(cluster_name_fields)
cluster_name = re.sub(r'[^0-9A-Za-z_]', '_', cluster_name)
self.logger.debug("%s: svc %s -> cluster %s" % (mapping.name, svc, cluster_name))
self.add_intermediate_cluster(mapping['_source'], cluster_name,
svc, [ url ],
cb_name=cb_name, od_name=od_name, grpc=grpc,
originate_tls=originate_tls, host_rewrite=host_rewrite)
return svc, cluster_name
def merge_tmods(self, tls_module, generated_module, key):
"""
Merge TLS module configuration for a particular key. In the event of conflicts, the
tls_module element wins, and an error is posted so that the diagnostics service can
show it.
Returns a TLS module with a correctly-merged config element. This will be the
tls_module (possibly modified) unless no tls_module is present, in which case
the generated_module will be promoted. If any changes were made to the module, it
will be marked as referenced by the generated_module.
:param tls_module: the `tls` module; may be None
:param generated_module: the `tls-from-ambassador-certs` module; may be None
:param key: the key in the module config to merge
:return: TLS module object; see above.
"""
# First up, the easy cases. If either module is missing, return the other.
# (The other might be None too, of course.)
if generated_module is None:
return tls_module
elif tls_module is None:
return generated_module
else:
self.logger.debug("tls_module %s" % json.dumps(tls_module, indent=4))
self.logger.debug("generated_module %s" % json.dumps(generated_module, indent=4))
# OK, no easy cases. We know that both modules exist: grab the config dicts.
tls_source = tls_module['_source']
tls_config = tls_module.get(key, {})
gen_source = generated_module['_source']
gen_config = generated_module.get(key, {})
# Now walk over the tls_config and copy anything needed.
any_changes = False
for ckey in gen_config:
if ckey in tls_config:
# ckey exists in both modules. Do they have the same value?
if tls_config[ckey] != gen_config[ckey]:
# No -- post an error, but let the version from the TLS module win.
errfmt = "CONFLICT in TLS config for {}.{}: using {} from TLS module in {}"
errstr = errfmt.format(key, ckey, tls_config[ckey], tls_source)
self.post_error(RichStatus.fromError(errstr))
else:
# They have the same value. Worth mentioning in debug.
self.logger.debug("merge_tmods: {}.{} duplicated with same value".format(key, ckey))
else:
# ckey only exists in gen_config. Copy it over.
self.logger.debug("merge_tmods: copy {}.{} from gen_config".format(key, ckey))
tls_config[ckey] = gen_config[ckey]
any_changes = True
# If we had changes...
if any_changes:
# ...then mark the tls_module as referenced by the generated_module's
# source..
tls_module._mark_referenced_by(gen_source)
# ...and copy the tls_config back in (in case the key wasn't in the tls_module
# config at all originally).
tls_module[key] = tls_config
# Finally, return the tls_module.
return tls_module
def generate_intermediate_config(self):
# First things first. The "Ambassador" module always exists; create it with
# default values now.
self.ambassador_module = SourcedDict(
service_port = 80,
admin_port = 8001,
diag_port = 8877,
auth_enabled = None,
liveness_probe = { "enabled": True },
readiness_probe = { "enabled": True },
diagnostics = { "enabled": True },
tls_config = None,
use_proxy_proto = False,
x_forwarded_proto_redirect = False,
)
# Next up: let's define initial clusters, routes, and filters.
#
# Our set of clusters starts out empty; we use add_intermediate_cluster()
# to build it up while making sure that all the source-tracking stuff
# works out.
#
# Note that we use a map for clusters, not a list -- the reason is that
# multiple mappings can use the same service, and we don't want multiple
# clusters.
self.envoy_clusters = {}
# Our initial set of routes is empty...
self.envoy_routes = {}
# Our initial list of grpc_services is empty...
self.envoy_config['grpc_services'] = []
# Now we look at user-defined modules from our config...
modules = self.config.get('modules', {})
# ...most notably the 'ambassador' and 'tls' modules, which are handled first.
amod = modules.get('ambassador', None)
tls_module = modules.get('tls', None)
# Part of handling the 'tls' module is folding in the 'tls-from-ambassador-certs'
# module, so grab that too...
generated_module = modules.get('tls-from-ambassador-certs', None)
# ...and merge the 'server' and 'client' config elements.
tls_module = self.merge_tmods(tls_module, generated_module, 'server')
tls_module = self.merge_tmods(tls_module, generated_module, 'client')
# OK, done. Make sure we have _something_ for the TLS module going forward.
tmod = tls_module or {}
self.logger.debug("TLS module after merge: %s" % json.dumps(tmod, indent=4))
if amod or tmod:
self.module_config_ambassador("ambassador", amod, tmod)
router_config = {}
tracing_configs = self.config.get('tracing_configs', None)
self.module_config_tracing(tracing_configs)
if 'tracing' in self.envoy_config:
router_config['start_child_span'] = True
# !!!! WARNING WARNING WARNING !!!! Filters are actually ORDER-DEPENDENT.
self.envoy_config['filters'] = []
# Start with authentication filter
auth_mod = modules.get('authentication', None)
auth_configs = self.config.get('auth_configs', None)
auth_filter = self.module_config_authentication("authentication", amod, auth_mod, auth_configs)
if auth_filter:
self.envoy_config['filters'].append(auth_filter)
# Then append the rate-limit filter, because we might rate-limit based on auth headers
ratelimit_configs = self.config.get('ratelimit_configs', None)
(ratelimit_filter, ratelimit_grpc_service) = self.module_config_ratelimit(ratelimit_configs)
if ratelimit_filter and ratelimit_grpc_service:
self.envoy_config['filters'].append(ratelimit_filter)
self.envoy_config['grpc_services'].append(ratelimit_grpc_service)
# Then append non-configurable cors and decoder filters
self.envoy_config['filters'].append(SourcedDict(name="cors", config={}))
self.envoy_config['filters'].append(SourcedDict(type="decoder", name="router", config=router_config))
# For mappings, start with empty sets for everything.
mappings = self.config.get("mappings", {})
self.breakers = self.config.get("CircuitBreaker", {})
for key, breaker in self.breakers.items():
breaker['_referenced_by'] = []
self.outliers = self.config.get("OutlierDetection", {})
for key, outlier in self.outliers.items():
outlier['_referenced_by'] = []
# OK. Given those initial sets, let's look over our global modules.
for module_name in modules.keys():
if ((module_name == 'ambassador') or
(module_name == 'tls') or
(module_name == 'authentication') or
(module_name == 'tls-from-ambassador-certs')):
continue
handler_name = "module_config_%s" % module_name
handler = getattr(self, handler_name, None)
if not handler:
self.logger.error("module %s: no configuration generator, skipping" % module_name)
continue
handler(module_name, modules[module_name])
# Once modules are handled, we can set up our admin config...
self.envoy_config['admin'] = SourcedDict(
_from=self.ambassador_module,
admin_port=self.ambassador_module["admin_port"]
)
# ...and our listeners.
primary_listener = SourcedDict(
_from=self.ambassador_module,
service_port=self.ambassador_module["service_port"],
require_tls=False,
use_proxy_proto=self.ambassador_module['use_proxy_proto']
)
if 'use_remote_address' in self.ambassador_module:
primary_listener['use_remote_address'] = self.ambassador_module['use_remote_address']
# If x_forwarded_proto_redirect is set, then we enable require_tls in primary listener, which in turn sets
# require_ssl to true in envoy config. Once set, then all requests that contain X-FORWARDED-PROTO set to
# https, are processes normally by envoy. In all the other cases, including X-FORWARDED-PROTO set to http,
# a 301 redirect response to https://host is sent
if self.ambassador_module.get('x_forwarded_proto_redirect', False):
primary_listener['require_tls'] = True
self.logger.debug("x_forwarded_proto_redirect is set to true, enabling 'require_tls' in listener")
redirect_cleartext_from = None
tmod = self.ambassador_module.get('tls_config', None)
# ...TLS config, if necessary...
if tmod:
# self.logger.debug("USING TLS")
primary_listener['tls'] = tmod
if self.tmod_certs_exist(primary_listener['tls']) > 0:
primary_listener['tls']['ssl_context'] = True
redirect_cleartext_from = tmod.get('redirect_cleartext_from')
self.envoy_config['listeners'] = [ primary_listener ]
if redirect_cleartext_from:
# We only want to set `require_tls` on the primary listener when certs are present on the pod
if self.tmod_certs_exist(primary_listener['tls']) > 0:
primary_listener['require_tls'] = True
new_listener = SourcedDict(
_from=self.ambassador_module,
service_port=redirect_cleartext_from,
require_tls=True,
# Note: no TLS context here, this is a cleartext listener.
# We can set require_tls True because we can let the upstream
# tell us about that.
use_proxy_proto=self.ambassador_module['use_proxy_proto']
)
if 'use_remote_address' in self.ambassador_module:
new_listener['use_remote_address'] = self.ambassador_module['use_remote_address']
self.envoy_config['listeners'].append(new_listener)
self.default_liveness_probe['service'] = self.diag_service()
self.default_readiness_probe['service'] = self.diag_service()
self.default_diagnostics['service'] = self.diag_service()
for name, cur, dflt in [
("liveness", self.ambassador_module['liveness_probe'],
self.default_liveness_probe),
("readiness", self.ambassador_module['readiness_probe'],
self.default_readiness_probe),
("diagnostics", self.ambassador_module['diagnostics'],
self.default_diagnostics)
]:
if cur and cur.get("enabled", False):
prefix = cur.get("prefix", dflt['prefix'])
rewrite = cur.get("rewrite", dflt['rewrite'])
service = cur.get("service", dflt['service'])
# Push a fake mapping to handle this.
name = "internal_%s_probe_mapping" % name
mappings[name] = Mapping(
_from=self.ambassador_module,
kind='Mapping',
name=name,
prefix=prefix,
rewrite=rewrite,
service=service
)
# self.logger.debug("PROBE %s: %s -> %s%s" % (name, prefix, service, rewrite))
# OK! We have all the mappings we need. Process them (don't worry about sorting
# yet, we'll do that on routes).
for mapping_name in sorted(mappings.keys()):
mapping = mappings[mapping_name]
# OK. Set up clusters for this service...
svc, cluster_name = self.add_clusters_for_mapping(mapping)
# ...and route.
self.add_intermediate_route(mapping['_source'], mapping, svc, cluster_name)
# OK. Walk the set of clusters and normalize names...
collisions = {}
mangled = {}
for name in sorted(self.envoy_clusters.keys()):
if len(name) > 60:
# Too long.
short_name = name[0:40]
collision_list = collisions.setdefault(short_name, [])
collision_list.append(name)
for short_name in sorted(collisions.keys()):
name_list = collisions[short_name]
i = 0
for name in sorted(name_list):
mangled_name = "%s-%d" % (short_name, i)
i += 1
self.logger.info("%s => %s" % (name, mangled_name))
mangled[name] = mangled_name
self.envoy_clusters[name]['name'] = mangled_name
# We need to default any unspecified weights and renormalize to 100
for group_id, route in self.envoy_routes.items():
clusters = route["clusters"]
total = 0.0
unspecified = 0
# If this is a websocket route, it will support only one cluster right now.
if route.get('use_websocket', False):
if len(clusters) > 1:
errmsg = "Only one cluster is supported for websockets; using %s" % clusters[0]['name']
self.post_error(RichStatus.fromError(errmsg))
for c in clusters:
# Mangle the name, if need be.
c_name = c["name"]
if c_name in mangled:
c["name"] = mangled[c_name]
# self.logger.info("%s: mangling cluster %s to %s" % (group_id, c_name, c["name"]))
if c["weight"] is None:
unspecified += 1
else:
total += c["weight"]
if unspecified:
for c in clusters:
if c["weight"] is None:
c["weight"] = (100.0 - total)/unspecified
elif total != 100.0:
for c in clusters:
c["weight"] *= 100.0/total
# OK. When all is said and done, sort the list of routes by route weight...
self.envoy_config['routes'] = sorted([
route for group_id, route in self.envoy_routes.items()
], reverse=True, key=Mapping.route_weight)
# ...then map clusters back into a list...
self.envoy_config['clusters'] = [
self.envoy_clusters[cluster_key] for cluster_key in sorted(self.envoy_clusters.keys())
]
# ...and finally repeat for breakers and outliers, but copy them in the process so we
# can mess with the originals.
#
# What's going on here is that circuit-breaker and outlier-detection configs aren't
# included as independent objects in envoy.json, but we want to be able to discuss
# them in diag. We also don't need to keep the _source and _referenced_by elements
# in their real Envoy appearances.
self.envoy_config['breakers'] = self.clean_and_copy(self.breakers)
self.envoy_config['outliers'] = self.clean_and_copy(self.outliers)
@staticmethod
def tmod_certs_exist(tmod):
"""
Returns the number of certs that are defined in the supplied tmod
:param tmod: The TLS module configuration
:return: number of certs in tmod
:rtype: int
"""
cert_count = 0
if tmod.get('cert_chain_file') is not None:
cert_count += 1
if tmod.get('private_key_file') is not None:
cert_count += 1
if tmod.get('cacert_chain_file') is not None:
cert_count += 1
return cert_count
def _get_intermediate_for(self, element_list, source_keys, value):
if not isinstance(value, dict):
return
good = True
if '_source' in value:
good = False
value_source = value.get("_source", None)
value_referenced_by = value.get("_referenced_by", [])
if ((value_source in source_keys) or
(source_keys & set(value_referenced_by))):
good = True
if good:
element_list.append(value)
def get_intermediate_for(self, source_key):
source_keys = []
if source_key.startswith("grp-"):
group_id = source_key[4:]
for route in self.envoy_config['routes']:
if route['_group_id'] == group_id:
source_keys.append(route['_source'])
for reference_key in route['_referenced_by']:
source_keys.append(reference_key)
if not source_keys:
return {
"error": "No group matches %s" % group_id
}
else:
if source_key in self.source_map:
# Exact match for a file in the source map: include all the objects
# in the file.
source_keys = self.source_map[source_key]
elif source_key in self.sources:
# Exact match for an object in a file: include only that object.
source_keys.append(source_key)
else:
# No match at all. Weird.
return {
"error": "No source matches %s" % source_key
}
source_keys = set(source_keys)
# self.logger.debug("get_intermediate_for: source_keys %s" % source_keys)
# self.logger.debug("get_intermediate_for: errors %s" % self.errors)
sources = []
for key in source_keys:
source_dict = dict(self.sources[key])
source_dict['errors'] = [
{
'summary': error['error'].split('\n', 1)[0],
'text': error['error']
}
for error in self.errors.get(key, [])
]
source_dict['source_key'] = key
sources.append(source_dict)
result = {
"sources": sources
}
# self.logger.debug("get_intermediate_for: initial result %s" % result)
for key in self.envoy_config.keys():
result[key] = []
value = self.envoy_config[key]
if isinstance(value, list):
for v2 in value:
self._get_intermediate_for(result[key], source_keys, v2)
else:
self._get_intermediate_for(result[key], source_keys, value)
return result
def generate_envoy_config(self, template=None, template_dir=None, **kwargs):
# Finally! Render the template to JSON...
envoy_json = self.to_json(template=template, template_dir=template_dir)
# We used to use the JSON parser as a final sanity check here. That caused
# Forge some issues, so it's turned off for now.
# rc = RichStatus.fromError("impossible")
# # ...and use the JSON parser as a final sanity check.
# try:
# obj = json.loads(envoy_json)
# rc = RichStatus.OK(msg="Envoy configuration OK", envoy_config=obj)
# except json.decoder.JSONDecodeError as e:
# rc = RichStatus.fromError("Invalid Envoy configuration: %s" % str(e),
# raw=envoy_json, exception=e)
# Go ahead and report that we generated an Envoy config, if we can.
scout_result = Config.scout_report(action="config", result=True, generated=True, **kwargs)
rc = RichStatus.OK(envoy_config=envoy_json, scout_result=scout_result)
# self.logger.debug("Scout reports %s" % json.dumps(rc.scout_result))
return rc
def set_config_ambassador(self, module, key, value, merge=False):
if not merge:
self.ambassador_module[key] = value
else:
self.ambassador_module[key].update(value)
# XXX This is actually wrong sometimes. If, for example, you have an
# ambassador module that defines the admin_port, sure, bringing in its
# source makes sense. On the other hand, if you have a TLS module
# created by a secret, that source shouldn't really take over the
# admin document. This will take enough unraveling that I'm going to
# leave it for now, though.
self.ambassador_module['_source'] = module['_source']
def update_config_ambassador(self, module, key, value):
self.set_config_ambassador(module, key, value, merge=True)
def tls_config_helper(self, name, amod, tmod):
tmp_config = SourcedDict(_from=amod)
some_enabled = False
for context_name in tmod.keys():
if context_name.startswith('_'):
continue
context = tmod[context_name]
# self.logger.debug("context %s -- %s" % (context_name, json.dumps(context)))
if context.get('enabled', True):
if context_name == 'server':
# Server-side TLS is enabled.
self.logger.debug("TLS termination enabled!")
some_enabled = True
# Switch to port 443 by default...
self.set_config_ambassador(amod, 'service_port', 443)
# ...and merge in the server-side defaults.
tmp_config.update(self.default_tls_config['server'])
tmp_config.update(tmod['server'])
# Check if secrets are supplied for TLS termination and/or TLS auth
secret = context.get('secret')
if secret is not None:
self.logger.debug("config.server.secret is {}".format(secret))
# If /{etc,ambassador}/certs/tls.crt does not exist, then load the secrets
if check_cert_file(TLSPaths.mount_tls_crt.value):
self.logger.debug("Secret already exists, taking no action for secret {}".format(secret))
elif check_cert_file(TLSPaths.tls_crt.value):
tmp_config['cert_chain_file'] = TLSPaths.tls_crt.value
tmp_config['private_key_file'] = TLSPaths.tls_key.value
else:
(server_cert, server_key, server_data) = read_cert_secret(kube_v1(), secret, self.namespace)
if server_cert and server_key:
self.logger.debug("saving contents of secret {} to {}".format(
secret, TLSPaths.cert_dir.value))
save_cert(server_cert, server_key, TLSPaths.cert_dir.value)
tmp_config['cert_chain_file'] = TLSPaths.tls_crt.value
tmp_config['private_key_file'] = TLSPaths.tls_key.value
elif context_name == 'client':
# Client-side TLS is enabled.
self.logger.debug("TLS client certs enabled!")
some_enabled = True
# Merge in the client-side defaults.
tmp_config.update(self.default_tls_config['client'])
tmp_config.update(tmod['client'])
secret = context.get('secret')
if secret is not None:
self.logger.debug("config.client.secret is {}".format(secret))
if check_cert_file(TLSPaths.client_mount_crt.value):
self.logger.debug("Secret already exists, taking no action for secret {}".format(secret))
elif check_cert_file(TLSPaths.client_tls_crt.value):
tmp_config['cacert_chain_file'] = TLSPaths.client_tls_crt.value
else:
(client_cert, _, _) = read_cert_secret(kube_v1(), secret, self.namespace)
if client_cert:
self.logger.debug("saving contents of secret {} to {}".format(
secret, TLSPaths.client_cert_dir.value))
save_cert(client_cert, None, TLSPaths.client_cert_dir.value)
tmp_config['cacert_chain_file'] = TLSPaths.client_tls_crt.value
else:
# This is a wholly new thing.
self.tls_contexts[context_name] = SourcedDict(
_from=tmod,
**context
)
if some_enabled:
if 'enabled' in tmp_config:
del(tmp_config['enabled'])
# Save the TLS config...
self.set_config_ambassador(amod, 'tls_config', tmp_config)
self.logger.debug("TLS config: %s" % json.dumps(self.ambassador_module['tls_config'], indent=4))
self.logger.debug("TLS contexts: %s" % json.dumps(self.tls_contexts, indent=4))
return some_enabled
def module_config_ambassador(self, name, amod, tmod):
# Toplevel Ambassador configuration. First up, check out TLS.
have_amod_tls = False
if amod and ('tls' in amod):
have_amod_tls = self.tls_config_helper(name, amod, amod['tls'])
if not have_amod_tls and tmod:
self.tls_config_helper(name, tmod, tmod)
if amod and ('cors' in amod):
self.parse_and_save_default_cors(amod)
# After that, check for port definitions, probes, etc., and copy them in
# as we find them.
for key in [ 'service_port', 'admin_port', 'diag_port',
'liveness_probe', 'readiness_probe', 'auth_enabled',
'use_proxy_proto', 'use_remote_address', 'diagnostics', 'x_forwarded_proto_redirect' ]:
if amod and (key in amod):
# Yes. It overrides the default.
self.set_config_ambassador(amod, key, amod[key])
def parse_and_save_default_cors(self, amod):
cors_default_temp = {'enabled': True}
cors = amod['cors']
origins = cors.get('origins')
if origins is not None:
if type(origins) is list:
cors_default_temp['allow_origin'] = origins
elif type(origins) is str:
cors_default_temp['allow_origin'] = origins.split(',')
else:
print("invalid cors configuration supplied - {}".format(origins))
return
self.save_cors_default_element("max_age", "max_age", cors_default_temp, cors)
self.save_cors_default_element("credentials", "allow_credentials", cors_default_temp, cors)
self.save_cors_default_element("methods", "allow_methods", cors_default_temp, cors)
self.save_cors_default_element("headers", "allow_headers", cors_default_temp, cors)
self.save_cors_default_element("exposed_headers", "expose_headers", cors_default_temp, cors)
self.envoy_config['cors_default'] = cors_default_temp
def save_cors_default_element(self, cors_key, route_key, cors_dest, cors_source):
if cors_source.get(cors_key) is not None:
if type(cors_source.get(cors_key)) is list:
cors_dest[route_key] = ", ".join(cors_source.get(cors_key))
else:
cors_dest[route_key] = cors_source.get(cors_key)
def module_config_ratelimit(self, ratelimit_config):
cluster_hosts = None
sources = []
if ratelimit_config:
for config in ratelimit_config.values():
sources.append(config['_source'])
cluster_hosts = config.get("service", None)
if not cluster_hosts or not sources:
return (None, None)
host_rewrite = config.get("host_rewrite", None)
cluster_name = "cluster_ext_ratelimit"
filter_config = {
"domain": "ambassador",
"request_type": "both",
"timeout_ms": 20
}
grpc_service = SourcedDict(
name="rate_limit_service",
cluster_name=cluster_name
)
first_source = sources.pop(0)
filter = SourcedDict(
_source=first_source,
type="decoder",
name="rate_limit",
config=filter_config
)
if cluster_name not in self.envoy_clusters:
(svc, url, originate_tls, otls_name) = self.service_tls_check(cluster_hosts, None, host_rewrite)
self.add_intermediate_cluster(first_source, cluster_name,
'extratelimit', [url],
type="strict_dns", lb_type="round_robin",
grpc=True, host_rewrite=host_rewrite)
for source in sources:
filter._mark_referenced_by(source)
self.envoy_clusters[cluster_name]._mark_referenced_by(source)
return (filter, grpc_service)
def module_config_tracing(self, tracing_config):
cluster_hosts = None
driver = None
driver_config = None
tag_headers = None
host_rewrite = None
sources = []
if tracing_config:
for config in tracing_config.values():
sources.append(config['_source'])
cluster_hosts = config.get("service", None)
driver = config.get("driver", None)
driver_config = config.get("config", {})
tag_headers = config.get("tag_headers", [])
host_rewrite = config.get("host_rewrite", None)
if not cluster_hosts or not sources:
return
cluster_name = "cluster_ext_tracing"
first_source = sources.pop(0)
if cluster_name not in self.envoy_clusters:
(svc, url, originate_tls, otls_name) = self.service_tls_check(cluster_hosts, None, host_rewrite)
grpc = False
ssl_context = None
if driver == "lightstep":
grpc = True
parsed_url = urlparse(url)
ssl_context = {
"ca_cert_file": "/etc/ssl/certs/ca-certificates.crt",
"verify_subject_alt_name": [parsed_url.hostname]
}
self.add_intermediate_cluster(first_source, cluster_name,
'exttracing', [url],
type="strict_dns", lb_type="round_robin",
host_rewrite=host_rewrite, grpc=grpc, ssl_context=ssl_context)
driver_config['collector_cluster'] = cluster_name
tracing = SourcedDict(
_source=first_source,
driver=driver,
config=driver_config,
tag_headers=tag_headers,
cluster_name=cluster_name
)
self.envoy_config['tracing'] = tracing
def auth_helper(self, sources, config, cluster_hosts, module):
sources.append(module['_source'])
for key in [ 'path_prefix', 'timeout_ms', 'cluster' ]:
value = module.get(key, None)
if value != None:
previous = config.get(key, None)
if previous and (previous != value):
errstr = (
"AuthService cannot support multiple %s values; using %s" %
(key, previous)
)
self.post_error(RichStatus.fromError(errstr), key=module['_source'])
else:
config[key] = value
headers = module.get('allowed_headers', None)
if headers:
allowed_headers = config.get('allowed_headers', [])
for hdr in headers:
if hdr not in allowed_headers:
allowed_headers.append(hdr)
config['allowed_headers'] = allowed_headers
auth_service = module.get("auth_service", None)
# weight = module.get("weight", 100)
weight = 100 # Can't support arbitrary weights right now.
if auth_service:
cluster_hosts[auth_service] = ( weight, module.get('tls', None) )
def module_config_authentication(self, name, amod, auth_mod, auth_configs):
filter_config = {
"cluster": "cluster_ext_auth",
"timeout_ms": 5000
}
cluster_hosts = {}
sources = []
if auth_mod:
self.auth_helper(sources, filter_config, cluster_hosts, auth_mod)
if auth_configs:
# self.logger.debug("auth_configs: %s" % auth_configs)
for config in auth_configs.values():
self.auth_helper(sources, filter_config, cluster_hosts, config)
if not sources:
return None
first_source = sources.pop(0)
filter = SourcedDict(
_source=first_source,
_services=sorted(cluster_hosts.keys()),
type="decoder",
name="extauth",
config=filter_config
)
cluster_name = filter_config['cluster']
host_rewrite = filter_config.get('host_rewrite', None)
if cluster_name not in self.envoy_clusters:
if not cluster_hosts:
cluster_hosts = { '127.0.0.1:5000': ( 100, None ) }
urls = []
protocols = {}
for svc in sorted(cluster_hosts.keys()):
weight, tls_context = cluster_hosts[svc]
(svc, url, originate_tls, otls_name) = self.service_tls_check(svc, tls_context, host_rewrite)
if originate_tls:
protocols['https'] = True
else:
protocols['http'] = True
if otls_name:
filter_config['cluster'] = cluster_name + "_" + otls_name
cluster_name = filter_config['cluster']
urls.append(url)
if len(protocols.keys()) != 1:
raise Exception("auth config cannot try to use both HTTP and HTTPS")
self.add_intermediate_cluster(first_source, cluster_name,
'extauth', urls,
type="strict_dns", lb_type="round_robin",
originate_tls=originate_tls, host_rewrite=host_rewrite)
for source in sources:
filter._mark_referenced_by(source)
self.envoy_clusters[cluster_name]._mark_referenced_by(source)
return filter
### DIAGNOSTICS
def diagnostic_overview(self):
# Build a set of source _files_ rather than source _objects_.
source_files = {}
for filename, source_keys in self.source_map.items():
# self.logger.debug("overview -- filename %s, source_keys %d" %
# (filename, len(source_keys)))
# # Skip '--internal--' etc.
# if filename.startswith('--'):
# continue
source_dict = source_files.setdefault(
filename,
{
'filename': filename,
'objects': {},
'count': 0,
'plural': "objects",
'error_count': 0,
'error_plural': "errors"
}
)
for source_key in source_keys:
# self.logger.debug("overview --- source_key %s" % source_key)
source = self.sources[source_key]
if ('source' in source) and not ('source' in source_dict):
source_dict['source'] = source['source']
raw_errors = self.errors.get(source_key, [])
errors = []
for error in raw_errors:
source_dict['error_count'] += 1
errors.append({
'summary': error['error'].split('\n', 1)[0],
'text': error['error']
})
source_dict['error_plural'] = "error" if (source_dict['error_count'] == 1) else "errors"
source_dict['count'] += 1
source_dict['plural'] = "object" if (source_dict['count'] == 1) else "objects"
object_dict = source_dict['objects']
object_dict[source_key] = {
'key': source_key,
'kind': source['kind'],
'errors': errors
}
routes = []
for route in self.envoy_config['routes']:
if route['_source'] != "--diagnostics--":
route['_group_id'] = Mapping.group_id(route.get('method', 'GET'),
route['prefix'] if 'prefix' in route else route['regex'],
route.get('headers', []))
routes.append(route)
configuration = { key: self.envoy_config[key] for key in self.envoy_config.keys()
if key != "routes" }
cluster_to_service_mapping = {
"cluster_ext_auth": "AuthService",
"cluster_ext_tracing": "TracingService",
"cluster_ext_ratelimit": "RateLimitService"
}
ambassador_services = []
for cluster in configuration.get('clusters', []):
maps_to_service = cluster_to_service_mapping.get(cluster['name'])
if maps_to_service:
service_weigth = 100.0 / len(cluster['urls'])
for url in cluster['urls']:
ambassador_services.append(SourcedDict(
_from=cluster,
type=maps_to_service,
name=url,
cluster=cluster['name'],
_service_weight=service_weigth
))
overview = dict(sources=sorted(source_files.values(), key=lambda x: x['filename']),
routes=routes,
**configuration)
if len(ambassador_services) > 0:
overview['ambassador_services'] = ambassador_services
# self.logger.debug("overview result %s" % json.dumps(overview, indent=4, sort_keys=True))
return overview
def pretty(self, obj, out=sys.stdout):
out.write(obj)
# json.dump(obj, out, indent=4, separators=(',',':'), sort_keys=True)
# out.write("\n")
def to_json(self, template=None, template_dir=None):
template_paths = [ self.config_dir_path, self.template_dir_path ]
if template_dir:
template_paths.insert(0, template_dir)
if not template:
env = Environment(loader=FileSystemLoader(template_paths))
template = env.get_template("envoy.j2")
return(template.render(**self.envoy_config))
def dump(self):
print("==== config")
self.pretty(self.config)
print("==== envoy_config")
self.pretty(self.envoy_config)
if __name__ == '__main__':
aconf = Config(sys.argv[1])
print(json.dumps(aconf.diagnostic_overview(), indent=4, sort_keys=True))
|
[] |
[] |
[
"AMBASSADOR_NAMESPACE",
"AMBASSADOR_SCOUT_ID",
"KUBERNETES_SERVICE_HOST",
"AMBASSADOR_SCOUT_RESULT",
"AMBASSADOR_ID"
] |
[]
|
["AMBASSADOR_NAMESPACE", "AMBASSADOR_SCOUT_ID", "KUBERNETES_SERVICE_HOST", "AMBASSADOR_SCOUT_RESULT", "AMBASSADOR_ID"]
|
python
| 5 | 0 | |
operator/cmd/mesh/shared.go
|
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package mesh contains types and functions that are used across the full
// set of mixer commands.
package mesh
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/cache"
"istio.io/istio/operator/pkg/helmreconciler"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/util/clog"
"istio.io/pkg/log"
)
var (
// installerScope is the scope for all commands in the mesh package.
installerScope = log.RegisterScope("installer", "installer", 0)
)
func initLogsOrExit(_ *rootArgs) {
if err := configLogs(log.DefaultOptions()); err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Could not configure logs: %s", err)
os.Exit(1)
}
}
func configLogs(opt *log.Options) error {
op := []string{"stderr"}
opt2 := *opt
opt2.OutputPaths = op
opt2.ErrorOutputPaths = op
return log.Configure(&opt2)
}
func refreshGoldenFiles() bool {
return os.Getenv("REFRESH_GOLDEN") == "true"
}
func ReadLayeredYAMLs(filenames []string) (string, error) {
return readLayeredYAMLs(filenames, os.Stdin)
}
func readLayeredYAMLs(filenames []string, stdinReader io.Reader) (string, error) {
var ly string
var stdin bool
for _, fn := range filenames {
var b []byte
var err error
if fn == "-" {
if stdin {
continue
}
stdin = true
b, err = ioutil.ReadAll(stdinReader)
} else {
b, err = ioutil.ReadFile(strings.TrimSpace(fn))
}
if err != nil {
return "", err
}
ly, err = util.OverlayYAML(ly, string(b))
if err != nil {
return "", err
}
}
return ly, nil
}
// confirm waits for a user to confirm with the supplied message.
func confirm(msg string, writer io.Writer) bool {
fmt.Fprintf(writer, "%s ", msg)
var response string
_, err := fmt.Scanln(&response)
if err != nil {
return false
}
response = strings.ToUpper(response)
if response == "Y" || response == "YES" {
return true
}
return false
}
// K8sConfig creates a rest.Config, Clientset and controller runtime Client from the given kubeconfig path and context.
func K8sConfig(kubeConfigPath string, context string) (*rest.Config, *kubernetes.Clientset, client.Client, error) {
restConfig, clientset, err := InitK8SRestClient(kubeConfigPath, context)
if err != nil {
return nil, nil, nil, err
}
// We are running a one-off command locally, so we don't need to worry too much about rate limitting
// Bumping this up greatly decreases install time
restConfig.QPS = 50
restConfig.Burst = 100
client, err := client.New(restConfig, client.Options{Scheme: scheme.Scheme})
if err != nil {
return nil, nil, nil, err
}
return restConfig, clientset, client, nil
}
// InitK8SRestClient creates a rest.Config qne Clientset from the given kubeconfig path and context.
func InitK8SRestClient(kubeconfig, kubeContext string) (*rest.Config, *kubernetes.Clientset, error) {
restConfig, err := defaultRestConfig(kubeconfig, kubeContext)
if err != nil {
return nil, nil, err
}
clientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, nil, err
}
return restConfig, clientset, nil
}
func defaultRestConfig(kubeconfig, kubeContext string) (*rest.Config, error) {
config, err := BuildClientConfig(kubeconfig, kubeContext)
if err != nil {
return nil, err
}
config.APIPath = "/api"
config.GroupVersion = &v1.SchemeGroupVersion
config.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: scheme.Codecs}
return config, nil
}
// BuildClientConfig is a helper function that builds client config from a kubeconfig filepath.
// It overrides the current context with the one provided (empty to use default).
//
// This is a modified version of k8s.io/client-go/tools/clientcmd/BuildConfigFromFlags with the
// difference that it loads default configs if not running in-cluster.
func BuildClientConfig(kubeconfig, context string) (*rest.Config, error) {
if kubeconfig != "" {
info, err := os.Stat(kubeconfig)
if err != nil || info.Size() == 0 {
// If the specified kubeconfig doesn't exists / empty file / any other error
// from file stat, fall back to default
kubeconfig = ""
}
}
//Config loading rules:
// 1. kubeconfig if it not empty string
// 2. In cluster config if running in-cluster
// 3. Config(s) in KUBECONFIG environment variable
// 4. Use $HOME/.kube/config
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
loadingRules.ExplicitPath = kubeconfig
configOverrides := &clientcmd.ConfigOverrides{
ClusterDefaults: clientcmd.ClusterDefaults,
CurrentContext: context,
}
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides).ClientConfig()
}
// applyOptions contains the startup options for applying the manifest.
type applyOptions struct {
// Path to the kubeconfig file.
Kubeconfig string
// ComponentName of the kubeconfig context to use.
Context string
// DryRun performs all steps except actually applying the manifests or creating output dirs/files.
DryRun bool
// Maximum amount of time to wait for resources to be ready after install when Wait=true.
WaitTimeout time.Duration
}
func applyManifest(restConfig *rest.Config, client client.Client, manifestStr string,
componentName name.ComponentName, opts *applyOptions, l clog.Logger) error {
// Needed in case we are running a test through this path that doesn't start a new process.
cache.FlushObjectCaches()
reconciler, err := helmreconciler.NewHelmReconciler(client, restConfig, nil, &helmreconciler.Options{DryRun: opts.DryRun, Log: l})
if err != nil {
l.LogAndError(err)
return err
}
ms := name.Manifest{
Name: componentName,
Content: manifestStr,
}
_, _, err = reconciler.ApplyManifest(ms)
return err
}
// getCRAndNamespaceFromFile returns the CR name and istio namespace from a file containing an IstioOperator CR.
func getCRAndNamespaceFromFile(filePath string, l clog.Logger) (customResource string, istioNamespace string, err error) {
if filePath == "" {
return "", "", nil
}
_, mergedIOPS, err := GenerateConfig([]string{filePath}, nil, false, nil, l)
if err != nil {
return "", "", err
}
b, err := ioutil.ReadFile(filePath)
if err != nil {
return "", "", fmt.Errorf("could not read values from file %s: %s", filePath, err)
}
customResource = string(b)
istioNamespace = v1alpha1.Namespace(mergedIOPS)
return
}
// createNamespace creates a namespace using the given k8s interface.
func createNamespace(cs kubernetes.Interface, namespace string) error {
if namespace == "" {
// Setup default namespace
namespace = "istio-system"
}
ns := &v1.Namespace{ObjectMeta: v12.ObjectMeta{
Name: namespace,
Labels: map[string]string{
"istio-injection": "disabled",
},
}}
_, err := cs.CoreV1().Namespaces().Create(context.TODO(), ns, v12.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create namespace %v: %v", namespace, err)
}
return nil
}
// deleteNamespace deletes namespace using the given k8s client.
func deleteNamespace(cs kubernetes.Interface, namespace string) error {
return cs.CoreV1().Namespaces().Delete(context.TODO(), namespace, v12.DeleteOptions{})
}
|
[
"\"REFRESH_GOLDEN\""
] |
[] |
[
"REFRESH_GOLDEN"
] |
[]
|
["REFRESH_GOLDEN"]
|
go
| 1 | 0 | |
train.py
|
#!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\
convert_splitbn_model, model_parameters
from timm.utils import *
from timm.loss import *
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
parser.add_argument('data_dir', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
# Model parameters
parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='validation batch size override (default: None)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
parser.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=100, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-repeats', type=int, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
parser.add_argument('--bce-target-thresh', type=float, default=None,
help='Threshold for binarizing softened BCE targets (default: None, disabled)')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--worker-seeding', type=str, default='all',
help='worker seed mode (default: all)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--no-ddp-bb', action='store_true', default=False,
help='Force broadcast buffers for native DDP to off.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
args, args_text = _parse_args()
logpath = './output/' + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + args.experiment + '.log'
setup_default_logging(log_path=logpath)
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on 1 GPUs.')
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
random_seed(args.seed, args.rank)
if args.fuser:
set_jit_fuser(args.fuser)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint)
# _logger.info(model)
# input("here to stop:")
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.local_rank == 0:
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last: # https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp == 'apex':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp == 'apex':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb)
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset, root=args.data_dir, split=args.train_split, is_training=True,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
repeats=args.epoch_repeats)
dataset_eval = create_dataset(
args.dataset, root=args.data_dir, split=args.val_split, is_training=False,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=(3,160,160), #data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
worker_seeding=args.worker_seeding,
)
loader_eval = create_loader(
dataset_eval,
input_size=(3,224,224), #data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# smoothing is handled with mixup target transform which outputs sparse, soft targets
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh)
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if args.rank == 0:
if args.experiment:
tmptime = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
exp_name = args.experiment + tmptime
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = get_outdir(args.output if args.output else './output/train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
############ add for detailed time ##########
fp_time_m = AverageMeter()
bp_time_m = AverageMeter()
optimizer_time_m = AverageMeter()
model.train()
end = time.time()
datatime = time.time()
fptime = time.time()
bptime = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
datatime = time.time()
data_time_m.update(datatime - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
fptime = time.time()
fp_time_m.update(fptime - datatime)
loss = loss_fn(output, target)
### FP32-to-TF32
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
bp_time_m.update(loss_scaler.get_curtime() - fptime)
optimizer_time_m.update(time.time() - loss_scaler.get_curtime())
else:
loss.backward(create_graph=second_order)
### FP32-to-TF32
bptime = time.time()
bp_time_m.update(bptime - fptime)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
### FP32-to-TF32 ?
optimizer_time_m.update(time.time() - bptime)
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize() # Wait for all cores in all streams on the current device to complete.
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
# if args.local_rank == 0:
# _logger.info(
# 'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
# 'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) '
# 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
# '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
# 'LR: {lr:.3e} '
# 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
# epoch,
# batch_idx, len(loader),
# 100. * batch_idx / last_idx,
# loss=losses_m,
# batch_time=batch_time_m,
# rate=input.size(0) * args.world_size / batch_time_m.val,
# rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
# lr=lr,
# data_time=data_time_m))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '
'FP: {fp_time.val:.3f} ({fp_time.avg:.3f}) '
'BP: {bp_time.val:.3f} ({bp_time.avg:.3f}) '
'Optimizer: {optimizer_time.val:.3f} ({optimizer_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
data_time=data_time_m,
fp_time=fp_time_m,
bp_time=bp_time_m,
optimizer_time=optimizer_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
|
[] |
[] |
[
"WORLD_SIZE"
] |
[]
|
["WORLD_SIZE"]
|
python
| 1 | 0 | |
src/MyTrain_MIC5_Decoder8.py
|
import os
import glob
import torch
import numpy as np
import torch.nn as nn
from PIL import Image
import torch.optim as optim
from torchvision import models
import torch.nn.functional as F
from torchvision import transforms
from alisuretool.Tools import Tools
from torch.utils.data import DataLoader, Dataset
#######################################################################################################################
# 1 Data
class DatasetUSOD(Dataset):
def __init__(self, img_name_list, is_train=True):
# self.image_name_list = img_name_list[:20]
# self.label_name_list = lbl_name_list[:20]
self.image_name_list = img_name_list
self.is_train = is_train
self.transform_train = transforms.Compose([
transforms.RandomResizedCrop(size=224, scale=(0.3, 1.)),
transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
transforms.RandomGrayscale(p=0.2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
self.transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
pass
def __len__(self):
return len(self.image_name_list)
def __getitem__(self, idx):
image = Image.open(self.image_name_list[idx]).convert("RGB")
image = self.transform_train(image) if self.is_train else self.transform_test(image)
return image, idx
pass
#######################################################################################################################
# 2 Model
class ResBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
pass
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
pass
class ConvBlock(nn.Module):
def __init__(self, cin, cout, stride=1, has_relu=True):
super(ConvBlock, self).__init__()
self.has_relu = has_relu
self.conv = nn.Conv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn = nn.BatchNorm2d(cout)
self.relu = nn.ReLU(inplace=True)
pass
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
if self.has_relu:
out = self.relu(out)
return out
pass
class MICNormalize(nn.Module):
def __init__(self, power=2):
super(MICNormalize, self).__init__()
self.power = power
pass
def forward(self, x, dim=1):
norm = x.pow(self.power).sum(dim, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
pass
class MICProduceClass(object):
def __init__(self, n_sample, out_dim, ratio=1.0):
super().__init__()
self.out_dim = out_dim
self.n_sample = n_sample
self.class_per_num = self.n_sample // self.out_dim * ratio
self.count = 0
self.count_2 = 0
self.class_num = np.zeros(shape=(self.out_dim, ), dtype=np.int)
self.classes = np.zeros(shape=(self.n_sample, ), dtype=np.int)
pass
def reset(self):
self.count = 0
self.count_2 = 0
self.class_num *= 0
pass
def cal_label(self, out, indexes):
top_k = out.data.topk(self.out_dim, dim=1)[1].cpu()
indexes_cpu = indexes.cpu()
batch_size = top_k.size(0)
class_labels = np.zeros(shape=(batch_size,), dtype=np.int)
for i in range(batch_size):
for j_index, j in enumerate(top_k[i]):
if self.class_per_num > self.class_num[j]:
class_labels[i] = j
self.class_num[j] += 1
self.count += 1 if self.classes[indexes_cpu[i]] != j else 0
self.classes[indexes_cpu[i]] = j
self.count_2 += 1 if j_index != 0 else 0
break
pass
pass
pass
def get_label(self, indexes):
return torch.tensor(self.classes[indexes.cpu().numpy()]).long()
pass
class BASNet(nn.Module):
def __init__(self, n_channels, clustering_num_list=None, pretrained=True, has_mask=True, more_obj=False):
super(BASNet, self).__init__()
self.has_mask = has_mask # 28
self.more_obj = more_obj # 28
resnet = models.resnet18(pretrained=pretrained)
# -------------Encoder--------------
self.encoder0 = ConvBlock(n_channels, 64, has_relu=True) # 64 * 224 * 224
self.encoder1 = resnet.layer1 # 64 * 224 * 224
self.encoder2 = resnet.layer2 # 128 * 112 * 112
self.encoder3 = resnet.layer3 # 256 * 56 * 56
self.encoder4 = resnet.layer4 # 512 * 28 * 28
# -------------MIC-------------
self.clustering_num_list = list([128, 256, 512]) if clustering_num_list is None else clustering_num_list
# MIC 1
self.mic_1_b1 = ResBlock(512, 512) # 28
self.mic_1_b2 = ResBlock(512, 512)
self.mic_1_b3 = ResBlock(512, 512)
self.mic_1_c1 = ConvBlock(512, self.clustering_num_list[0], has_relu=True)
self.mic_1_l2norm = MICNormalize(2)
# MIC 2
self.mic_2_pool = nn.MaxPool2d(2, 2, ceil_mode=True)
self.mic_2_b1 = ResBlock(512, 512) # 14
self.mic_2_b2 = ResBlock(512, 512)
self.mic_2_b3 = ResBlock(512, 512)
self.mic_2_c1 = ConvBlock(512, self.clustering_num_list[1], has_relu=True)
self.mic_2_l2norm = MICNormalize(2)
# MIC 3
self.mic_3_pool = nn.MaxPool2d(2, 2, ceil_mode=True)
self.mic_3_b1 = ResBlock(512, 512) # 7
self.mic_3_b2 = ResBlock(512, 512)
self.mic_3_b3 = ResBlock(512, 512)
self.mic_3_c1 = ConvBlock(512, self.clustering_num_list[2], has_relu=True)
self.mic_3_l2norm = MICNormalize(2)
# Decoder
self.decoder_1_b = ResBlock(512, 512) # 28
self.decoder_1_out = nn.Conv2d(512, 1, 3, padding=1)
self.decoder_1_c = ConvBlock(512, 256, has_relu=True)
self.decoder_2_b = ResBlock(256, 256) # 56
self.decoder_2_out = nn.Conv2d(256, 1, 3, padding=1)
self.decoder_2_c = ConvBlock(256, 128, has_relu=True)
self.decoder_3_b = ResBlock(128, 128) # 112
self.decoder_3_out = nn.Conv2d(128, 1, 3, padding=1)
# UP
self.mic_up_2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.mic_up_4 = nn.Upsample(scale_factor=4, mode='bilinear')
self.mic_up_8 = nn.Upsample(scale_factor=8, mode='bilinear')
self.mic_up_16 = nn.Upsample(scale_factor=16, mode='bilinear')
self.mic_up_32 = nn.Upsample(scale_factor=32, mode='bilinear')
pass
def forward(self, x):
# -------------Encoder-------------
e0 = self.encoder0(x) # 64 * 224 * 224
e1 = self.encoder1(e0) # 64 * 224 * 224
e2 = self.encoder2(e1) # 128 * 112 * 112
e3 = self.encoder3(e2) # 256 * 56 * 56
e4 = self.encoder4(e3) # 512 * 28 * 28
# -------------MIC-------------
# 1
mic_f_1 = self.mic_1_b1(e4)
mic_f_1 = self.mic_1_b2(mic_f_1)
mic_f_1 = self.mic_1_b3(mic_f_1)
mic_1 = self.mic_1_c1(mic_f_1) # 512 * 28 * 28
smc_logits_1, smc_l2norm_1, smc_sigmoid_1 = self.salient_map_clustering(mic_1, which=1, has_mask=self.has_mask)
cam_1 = self.cluster_activation_map(smc_logits_1, mic_1) # 簇激活图:Cluster Activation Map
return_m1 = {
"mic_f": mic_f_1,
"mic": mic_1,
"smc_logits": smc_logits_1,
"smc_l2norm": smc_l2norm_1,
"smc_sigmoid": smc_sigmoid_1,
"cam": cam_1
}
# 2
mic_f_2 = self.mic_2_pool(mic_f_1) # 512 * 14 * 14
mic_f_2 = self.mic_2_b1(mic_f_2)
mic_f_2 = self.mic_2_b2(mic_f_2)
mic_f_2 = self.mic_2_b3(mic_f_2)
mic_2 = self.mic_2_c1(mic_f_2) # 512 * 14 * 14
smc_logits_2, smc_l2norm_2, smc_sigmoid_2 = self.salient_map_clustering(mic_2, which=2, has_mask=self.has_mask)
cam_2 = self.cluster_activation_map(smc_logits_2, mic_2) # 簇激活图:Cluster Activation Map
return_m2 = {
"mic_f": mic_f_2,
"mic": mic_2,
"smc_logits": smc_logits_2,
"smc_l2norm": smc_l2norm_2,
"smc_sigmoid": smc_sigmoid_2,
"cam": cam_2
}
# 3
mic_f_3 = self.mic_3_pool(mic_f_2) # 512 * 7 * 7
mic_f_3 = self.mic_3_b1(mic_f_3)
mic_f_3 = self.mic_3_b2(mic_f_3)
mic_f_3 = self.mic_3_b3(mic_f_3)
mic_3 = self.mic_3_c1(mic_f_3) # 512 * 7 * 7
smc_logits_3, smc_l2norm_3, smc_sigmoid_3 = self.salient_map_clustering(mic_3, which=3, has_mask=self.has_mask)
cam_3 = self.cluster_activation_map(smc_logits_3, mic_3) # 簇激活图:Cluster Activation Map
return_m3 = {
"mic_f": mic_f_3,
"mic": mic_3,
"smc_logits": smc_logits_3,
"smc_l2norm": smc_l2norm_3,
"smc_sigmoid": smc_sigmoid_3,
"cam": cam_3
}
# -------------Label-------------
cam_norm_1_up = self.mic_up_8(cam_1)
cam_norm_2_up = self.mic_up_16(cam_2)
cam_norm_3_up = self.mic_up_32(cam_3)
cam_norm_2_up = self.up_to_target(cam_norm_2_up, cam_norm_1_up)
cam_norm_3_up = self.up_to_target(cam_norm_3_up, cam_norm_1_up)
# 1
cam_norm_up = (cam_norm_1_up + cam_norm_2_up) / 2
label = self.salient_map_divide(cam_norm_up, obj_th=0.80, bg_th=0.15, more_obj=self.more_obj) # 显著图划分
return_d0 = {
"label": label,
"cam_norm_up": cam_norm_up,
"cam_norm_1_up": cam_norm_1_up,
"cam_norm_2_up": cam_norm_2_up,
"cam_norm_3_up": cam_norm_3_up
}
# -------------Decoder-------------
d1 = self.decoder_1_b(e4) # 512 * 56 * 56
d1_d2 = self.up_to_target(self.mic_up_2(self.decoder_1_c(d1)), e3) + e3 # 256 * 56 * 56
d1_out = self.decoder_1_out(d1) # 1 * 28 * 28
d1_out_sigmoid = torch.sigmoid(d1_out) # 1 * 28 * 28 # 小输出
d1_out_up = self.mic_up_8(d1_out) # 1 * 224 * 224
d1_out_up_sigmoid = torch.sigmoid(d1_out_up) # 1 * 224 * 224 # 大输出
return_d1 = {
"out": d1_out,
"out_sigmoid": d1_out_sigmoid,
"out_up": d1_out_up,
"out_up_sigmoid": d1_out_up_sigmoid
}
d2 = self.decoder_2_b(d1_d2) # 256 * 56 * 56
d2_d3 = self.up_to_target(self.mic_up_2(self.decoder_2_c(d2)), e2) + e2 # 128 * 112 * 112
d2_out = self.decoder_2_out(d2) # 1 * 56 * 56
d2_out_sigmoid = torch.sigmoid(d2_out) # 1 * 56 * 56 # 小输出
d2_out_up = self.mic_up_4(d2_out) # 1 * 224 * 224
d2_out_up_sigmoid = torch.sigmoid(d2_out_up) # 1 * 224 * 224 # 大输出
return_d2 = {
"out": d2_out,
"out_sigmoid": d2_out_sigmoid,
"out_up": d2_out_up,
"out_up_sigmoid": d2_out_up_sigmoid
}
d3 = self.decoder_3_b(d2_d3) # 128 * 112 * 112
d3_out = self.decoder_3_out(d3) # 1 * 112 * 112
d3_out_sigmoid = torch.sigmoid(d3_out) # 1 * 112 * 112 # 小输出
d3_out_up = self.mic_up_2(d3_out) # 1 * 224 * 224
d3_out_up_sigmoid = torch.sigmoid(d3_out_up) # 1 * 224 * 224 # 大输出
return_d3 = {
"out": d3_out,
"out_sigmoid": d3_out_sigmoid,
"out_up": d3_out_up,
"out_up_sigmoid": d3_out_up_sigmoid
}
return_m = {"m1": return_m1, "m2": return_m2, "m3": return_m3}
return_d = {"label": return_d0, "d1": return_d1, "d2": return_d2, "d3": return_d3}
return return_m, return_d
@staticmethod
def up_to_target(source, target):
if source.size()[2] != target.size()[2] or source.size()[3] != target.size()[3]:
source = torch.nn.functional.interpolate(source, size=[target.size()[2], target.size()[3]])
return source
def salient_map_clustering(self, mic, which=1, has_mask=True):
# m1
mic_gaussian = mic
if has_mask:
if which == 1:
g_mask = self._mask_gaussian([mic.size()[2], mic.size()[3]], sigma=mic.size()[2] * mic.size()[3] // 2)
mic_gaussian = mic * torch.tensor(g_mask).cuda()
elif which == 2:
# g_mask = self._mask_gaussian([mic.size()[2], mic.size()[3]], sigma=mic.size()[2] * mic.size()[3])
# mic_gaussian = mic * torch.tensor(g_mask).cuda()
mic_gaussian = mic
else:
mic_gaussian = mic
pass
smc_logits = F.adaptive_avg_pool2d(mic_gaussian, 1).view((mic_gaussian.size()[0], -1)) # 512
smc_l2norm = self.mic_1_l2norm(smc_logits)
smc_sigmoid = torch.sigmoid(smc_logits)
return smc_logits, smc_l2norm, smc_sigmoid
def cluster_activation_map(self, smc_logits, mic_feature):
top_k_value, top_k_index = torch.topk(smc_logits, 1, 1)
cam = torch.cat([mic_feature[i:i+1, top_k_index[i], :, :] for i in range(mic_feature.size()[0])])
cam_norm = self._feature_norm(cam)
return cam_norm
def salient_map_divide(self, cam_norm_up, obj_th=0.7, bg_th=0.2, more_obj=False):
cam_norm_up = self._feature_norm(cam_norm_up)
label = torch.zeros(tuple(cam_norm_up.size())).fill_(255)
label = label.cuda() if torch.cuda.is_available() else label
label[cam_norm_up < bg_th] = 0.0
if more_obj:
for i in range(cam_norm_up.size()[0]):
mask_pos_i = cam_norm_up[i] > obj_th
if torch.sum(mask_pos_i) < 28:
mask_pos_i = cam_norm_up[i] > (obj_th * 0.9)
pass
label[i][mask_pos_i] = 1.0
pass
pass
else:
label[cam_norm_up > obj_th] = 1.0
pass
return label
@staticmethod
def _feature_norm(feature_map):
feature_shape = feature_map.size()
batch_min, _ = torch.min(feature_map.view((feature_shape[0], -1)), dim=-1, keepdim=True)
batch_max, _ = torch.max(feature_map.view((feature_shape[0], -1)), dim=-1, keepdim=True)
norm = torch.div(feature_map.view((feature_shape[0], -1)) - batch_min, batch_max - batch_min)
return norm.view(feature_shape)
@staticmethod
def _mask_gaussian(image_size, where=None, sigma=20):
x = np.arange(0, image_size[1], 1, float)
y = np.arange(0, image_size[0], 1, float)
y = y[:, np.newaxis]
if where:
x0, y0 = where[1], where[0]
else:
x0, y0 = image_size[1] // 2, image_size[0] // 2
pass
# 生成高斯掩码
mask = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma).astype(np.float32)
return mask
pass
#######################################################################################################################
# 3 Runner
class BASRunner(object):
def __init__(self, epoch_num=1000, batch_size_train=8, has_mask=True, more_obj=False,
clustering_num_1=128, clustering_num_2=256, clustering_num_3=512,
clustering_ratio_1=1, clustering_ratio_2=1.5, clustering_ratio_3=2,
data_dir='/mnt/4T/Data/SOD/DUTS/DUTS-TR', tra_image_dir='DUTS-TR-Image',
tra_label_dir='DUTS-TR-Mask', model_dir="./saved_models/my_train_mic_only"):
self.epoch_num = epoch_num
self.batch_size_train = batch_size_train
self.has_mask = has_mask
self.more_obj = more_obj
# Dataset
self.model_dir = model_dir
self.data_dir = data_dir
self.tra_image_dir = tra_image_dir
self.tra_label_dir = tra_label_dir
self.tra_img_name_list, tra_lbl_name_list = self.get_tra_img_label_name()
self.dataset_usod = DatasetUSOD(img_name_list=self.tra_img_name_list, is_train=True)
self.dataloader_usod = DataLoader(self.dataset_usod, self.batch_size_train, shuffle=True, num_workers=8)
# Model
self.net = BASNet(3, clustering_num_list=[clustering_num_1, clustering_num_2, clustering_num_3],
pretrained=True, has_mask=self.has_mask, more_obj=self.more_obj)
self.net = self.net.cuda() if torch.cuda.is_available() else self.net
# MIC
self.produce_class_1 = MICProduceClass(n_sample=len(self.dataset_usod),
out_dim=clustering_num_1, ratio=clustering_ratio_1)
self.produce_class_2 = MICProduceClass(n_sample=len(self.dataset_usod),
out_dim=clustering_num_2, ratio=clustering_ratio_2)
self.produce_class_3 = MICProduceClass(n_sample=len(self.dataset_usod),
out_dim=clustering_num_3, ratio=clustering_ratio_3)
# Loss and Optim
self.bce_loss = nn.BCELoss()
self.mic_loss = nn.CrossEntropyLoss()
self.bce_loss = self.bce_loss.cuda() if torch.cuda.is_available() else self.bce_loss
self.mic_loss = self.mic_loss.cuda() if torch.cuda.is_available() else self.mic_loss
self.optimizer = optim.Adam(self.net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
pass
def load_model(self, model_file_name):
self.net.load_state_dict(torch.load(model_file_name), strict=False)
Tools.print("restore from {}".format(model_file_name))
pass
def get_tra_img_label_name(self):
tra_img_name_list = glob.glob(os.path.join(self.data_dir, self.tra_image_dir, '*.jpg'))
tra_lbl_name_list = [os.path.join(self.data_dir, self.tra_label_dir, '{}.png'.format(
os.path.splitext(os.path.basename(img_path))[0])) for img_path in tra_img_name_list]
Tools.print("train images: {}".format(len(tra_img_name_list)))
Tools.print("train labels: {}".format(len(tra_lbl_name_list)))
return tra_img_name_list, tra_lbl_name_list
def all_loss_fusion(self, mic_1_out, mic_2_out, mic_3_out,
mic_labels_1, mic_labels_2, mic_labels_3, sod_sigmoid, sod_label):
loss_mic_1 = self.mic_loss(mic_1_out, mic_labels_1)
loss_mic_2 = self.mic_loss(mic_2_out, mic_labels_2)
loss_mic_3 = self.mic_loss(mic_3_out, mic_labels_3)
positions = sod_label.view(-1, 1) < 255.0
loss_bce = self.bce_loss(sod_sigmoid.view(-1, 1)[positions], sod_label.view(-1, 1)[positions])
loss_all = (loss_mic_1 + loss_mic_2 + loss_mic_3) / 3 + 5 * loss_bce
return loss_all, loss_mic_1, loss_mic_2, loss_mic_3, loss_bce
def train(self, save_epoch_freq=5, print_ite_num=100, update_epoch_freq=1):
for epoch in range(0, self.epoch_num):
###########################################################################
# 0 更新标签
if epoch % update_epoch_freq == 0:
Tools.print()
Tools.print("Update label {} .......".format(epoch))
self.net.eval()
self.produce_class_1.reset()
self.produce_class_2.reset()
self.produce_class_3.reset()
for batch_idx, (inputs, indexes) in enumerate(self.dataloader_usod):
inputs = inputs.type(torch.FloatTensor)
inputs = inputs.cuda() if torch.cuda.is_available() else inputs
indexes = indexes.cuda() if torch.cuda.is_available() else indexes
return_m, return_d = self.net(inputs)
self.produce_class_1.cal_label(return_m["m1"]["smc_l2norm"], indexes)
self.produce_class_2.cal_label(return_m["m2"]["smc_l2norm"], indexes)
self.produce_class_3.cal_label(return_m["m3"]["smc_l2norm"], indexes)
pass
Tools.print("Epoch: [{}] {}/{} {}/{} {}/{}".format(
epoch, self.produce_class_1.count, self.produce_class_1.count_2, self.produce_class_2.count,
self.produce_class_2.count_2, self.produce_class_3.count, self.produce_class_3.count_2))
Tools.print()
pass
###########################################################################
# 1 训练模型
all_loss, all_loss_mic_1, all_loss_mic_2, all_loss_mic_3, all_loss_bce = 0.0, 0.0, 0.0, 0.0, 0.0
self.net.train()
for i, (inputs, indexes) in enumerate(self.dataloader_usod):
inputs = inputs.type(torch.FloatTensor)
inputs = inputs.cuda() if torch.cuda.is_available() else inputs
indexes = indexes.cuda() if torch.cuda.is_available() else indexes
self.optimizer.zero_grad()
return_m, return_d = self.net(inputs)
mic_labels_1 = self.produce_class_1.get_label(indexes)
mic_labels_1 = mic_labels_1.cuda() if torch.cuda.is_available() else mic_labels_1
mic_labels_2 = self.produce_class_2.get_label(indexes)
mic_labels_2 = mic_labels_2.cuda() if torch.cuda.is_available() else mic_labels_2
mic_labels_3 = self.produce_class_3.get_label(indexes)
mic_labels_3 = mic_labels_3.cuda() if torch.cuda.is_available() else mic_labels_3
# 1
target = return_d["d2"]["out_up_sigmoid"]
# 2
# target = return_d["d3"]["out_up_sigmoid"]
loss, loss_mic_1, loss_mic_2, loss_mic_3, loss_bce = self.all_loss_fusion(
return_m["m1"]["smc_logits"], return_m["m2"]["smc_logits"], return_m["m3"]["smc_logits"],
mic_labels_1, mic_labels_2, mic_labels_3, target, return_d["label"]["label"])
loss.backward()
self.optimizer.step()
all_loss += loss.item()
all_loss_mic_1 += loss_mic_1.item()
all_loss_mic_2 += loss_mic_2.item()
all_loss_mic_3 += loss_mic_3.item()
all_loss_bce += loss_bce.item()
if i % print_ite_num == 0:
Tools.print("[E:{:4d}/{:4d}, b:{:4d}/{:4d}] "
"a loss:{:.2f} loss:{:.2f} "
"a mic 1:{:.2f} mic 1:{:.2f} "
"a mic 2:{:.2f} mic 2:{:.2f} "
"a mic 3:{:.2f} mic 3:{:.2f} "
"a bce:{:.2f} bce:{:.2f}".format(
epoch, self.epoch_num, i, len(self.dataloader_usod),
all_loss/(i+1), loss.item(),
all_loss_mic_1/(i+1), loss_mic_1.item(),
all_loss_mic_2/(i+1), loss_mic_2.item(),
all_loss_mic_3/(i+1), loss_mic_3.item(),
all_loss_bce/(i+1), loss_bce.item()))
pass
pass
###########################################################################
# 2 保存模型
if epoch % save_epoch_freq == 0:
save_file_name = Tools.new_dir(os.path.join(
self.model_dir, "{}_train_{:.3f}.pth".format(epoch, all_loss / len(self.dataloader_usod))))
torch.save(self.net.state_dict(), save_file_name)
Tools.print()
Tools.print("Save Model to {}".format(save_file_name))
Tools.print()
pass
pass
pass
pass
#######################################################################################################################
# 4 Main
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
bas_runner = BASRunner(batch_size_train=8, has_mask=True, more_obj=False,
model_dir="./saved_models/my_train_mic5_decoder8_aug_mask_norm_5bce_d2")
bas_runner.load_model('./saved_models/my_train5_diff_aug_mask/125_train_6.569.pth')
bas_runner.train()
pass
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
k8s/k8s_test.go
|
package k8s
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"testing"
"github.com/hashicorp/vault/api"
"github.com/ory/dockertest"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
rootToken = "90b03685-e17b-7e5e-13a0-e14e45baeb2f"
)
func TestMain(m *testing.M) {
flag.Parse()
//os.Unsetenv("http_proxy")
//os.Unsetenv("https_proxy")
pool, err := dockertest.NewPool("unix:///var/run/docker.sock")
if err != nil {
log.Fatalf("Could not connect to docker: %s", err)
}
// pulls an image, creates a container based on it and runs it
resource, err := pool.Run("vault", "latest", []string{
"VAULT_DEV_ROOT_TOKEN_ID=" + rootToken,
"VAULT_DEV_LISTEN_ADDRESS=0.0.0.0:8200",
})
if err != nil {
log.Fatalf("Could not start resource: %s", err)
}
host := os.Getenv("DOCKER_HOST")
if host == "" {
host = "localhost"
}
if host != "localhost" && !strings.Contains(host, ".") {
host = host + ".pnet.ch"
}
vaultAddr := fmt.Sprintf("http://%s:%s", host, resource.GetPort("8200/tcp"))
os.Setenv("VAULT_ADDR", vaultAddr)
os.Setenv("VAULT_TOKEN", rootToken)
fmt.Println("VAULT_ADDR:", vaultAddr)
vaultConfig := api.DefaultConfig()
if err := vaultConfig.ReadEnvironment(); err != nil {
log.Fatal(err)
}
vaultClient, err := api.NewClient(vaultConfig)
if err != nil {
log.Fatal(err)
}
// exponential backoff-retry, because the application in the container might not be ready to accept connections yet
if err := pool.Retry(func() error {
_, err = vaultClient.Sys().ListMounts()
return err
}); err != nil {
log.Fatal(errors.Wrap(err, "could not connect to vault in docker"))
}
code := m.Run()
// You can't defer this because os.Exit doesn't care for defer
if err := pool.Purge(resource); err != nil {
log.Fatalf("could not purge resource: %s", err)
}
os.Exit(code)
}
func TestFixAuthMountPath(t *testing.T) {
testData := [][2]string{
{"kubernetes", "auth/kubernetes"},
{"/kubernetes", "auth/kubernetes"},
{"/kubernetes/", "auth/kubernetes"},
{"kubernetes/", "auth/kubernetes"},
{"kubernetes/something", "auth/kubernetes/something"},
{"auth/kubernetes", "auth/kubernetes"},
{"/auth/kubernetes", "auth/kubernetes"},
}
for _, td := range testData {
t.Log(td[0])
assert.Equal(t, td[1], FixAuthMountPath(td[0]))
}
}
func TestNewVaultFromEnvironment(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
t.Run("without minimal attributes", func(t *testing.T) {
v, err := NewFromEnvironment()
assert.Nil(t, v)
assert.Error(t, err)
})
t.Run("with minimal attributes", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
assert.Equal(t, "", v.Role)
assert.Equal(t, vaultTokenPath.Name(), v.TokenPath)
assert.Equal(t, false, v.ReAuth)
assert.Equal(t, 0, v.TTL)
assert.Equal(t, AuthMountPath, v.AuthMountPath)
assert.Equal(t, ServiceAccountTokenPath, v.ServiceAccountTokenPath)
assert.Equal(t, false, v.AllowFail)
})
t.Run("invalid VAULT_TTL", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_TTL", "1std")
defer os.Setenv("VAULT_TTL", "")
v, err := NewFromEnvironment()
assert.Nil(t, v)
assert.Error(t, err)
})
t.Run("valid VAULT_TTL", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_TTL", "1h")
defer os.Setenv("VAULT_TTL", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
assert.Equal(t, 3600, v.TTL)
})
t.Run("invalid VAULT_REAUTH", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_REAUTH", "no")
defer os.Setenv("VAULT_REAUTH", "")
v, err := NewFromEnvironment()
assert.Nil(t, v)
assert.Error(t, err)
})
t.Run("valid VAULT_REAUTH", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_REAUTH", "true")
defer os.Setenv("VAULT_REAUTH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
assert.Equal(t, true, v.ReAuth)
})
t.Run("invalid ALLOW_FAIL", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("ALLOW_FAIL", "no")
defer os.Setenv("ALLOW_FAIL", "")
v, err := NewFromEnvironment()
assert.Nil(t, v)
assert.Error(t, err)
})
t.Run("valid ALLOW_FAIL", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("ALLOW_FAIL", "true")
defer os.Setenv("ALLOW_FAIL", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
assert.Equal(t, true, v.AllowFail)
})
}
func TestToken(t *testing.T) {
t.Run("failed to store token", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", "/not/existing/path")
v, err := NewFromEnvironment()
assert.NoError(t, err)
assert.NotNil(t, v)
assert.Error(t, v.StoreToken(rootToken))
})
t.Run("failed to load token", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", "/not/existing/path")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
token, err := v.LoadToken()
assert.Error(t, err)
assert.Equal(t, "", token)
})
t.Run("load empty token", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
require.NoError(t, v.StoreToken(""))
token, err := v.LoadToken()
assert.Error(t, err)
assert.Equal(t, "", token)
})
t.Run("store and load token", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
require.NoError(t, v.StoreToken(rootToken))
token, err := v.LoadToken()
assert.NoError(t, err)
assert.Equal(t, rootToken, token)
})
t.Run("failed to get token without ReAuth", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_REAUTH", "false")
defer os.Setenv("VAULT_REAUTH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
token, err := v.GetToken()
assert.Error(t, err)
assert.Equal(t, "", token)
})
t.Run("failed to renew token without ReAuth", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_REAUTH", "false")
defer os.Setenv("VAULT_REAUTH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
require.NoError(t, v.StoreToken(rootToken))
token, err := v.GetToken()
assert.Error(t, err)
assert.Equal(t, "", token)
})
t.Run("successful renew token without ReAuth", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_REAUTH", "false")
defer os.Setenv("VAULT_REAUTH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
// create a new token
v.UseToken(rootToken)
secret, err := v.Client().Auth().Token().CreateOrphan(&api.TokenCreateRequest{
TTL: "3600s",
})
assert.NoError(t, err)
// store the new token
require.NoError(t, v.StoreToken(secret.Auth.ClientToken))
// the actual test
token, err := v.GetToken()
assert.NoError(t, err)
assert.Equal(t, secret.Auth.ClientToken, token)
})
}
func TestAuthenticate(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
serviceAccountTokenPath, err := ioutil.TempFile("", "sa-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(serviceAccountTokenPath.Name())
t.Run("failed to load service account token", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("SERVICE_ACCOUNT_TOKEN_PATH", "/not/existing/path")
defer os.Setenv("SERVICE_ACCOUNT_TOKEN_PATH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
token, err := v.Authenticate()
assert.Error(t, err)
assert.Equal(t, "", token)
})
t.Run("failed authentication", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("SERVICE_ACCOUNT_TOKEN_PATH", serviceAccountTokenPath.Name())
defer os.Setenv("SERVICE_ACCOUNT_TOKEN_PATH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
token, err := v.Authenticate()
assert.Error(t, err)
assert.Equal(t, "", token)
})
t.Run("successful authentication", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("SERVICE_ACCOUNT_TOKEN_PATH", serviceAccountTokenPath.Name())
defer os.Setenv("SERVICE_ACCOUNT_TOKEN_PATH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
vaultLogicalBackup := vaultLogical
vaultLogical = func(c *api.Client) vaultLogicalWriter {
return &fakeWriter{}
}
defer func() { vaultLogical = vaultLogicalBackup }()
token, err := v.Authenticate()
assert.NoError(t, err)
assert.Equal(t, rootToken, token)
})
t.Run("failed authentication with warnings", func(t *testing.T) {
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("SERVICE_ACCOUNT_TOKEN_PATH", serviceAccountTokenPath.Name())
defer os.Setenv("SERVICE_ACCOUNT_TOKEN_PATH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
vaultLogicalBackup := vaultLogical
vaultLogical = func(c *api.Client) vaultLogicalWriter {
return &fakeWriterWithWarnings{}
}
defer func() { vaultLogical = vaultLogicalBackup }()
token, err := v.Authenticate()
assert.Error(t, err)
assert.Equal(t, "", token)
})
t.Run("failed to get token with ReAuth", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_REAUTH", "true")
defer os.Setenv("VAULT_REAUTH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
token, err := v.GetToken()
assert.Error(t, err)
assert.Equal(t, "", token)
})
t.Run("failed to renew token with ReAuth", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_REAUTH", "true")
defer os.Setenv("VAULT_REAUTH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
require.NoError(t, v.StoreToken(rootToken))
token, err := v.GetToken()
assert.Error(t, err)
assert.Equal(t, "", token)
})
}
func TestRenew(t *testing.T) {
t.Run("failed to get renewer", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
// the actual test
r, err := v.NewRenewer(rootToken)
assert.Error(t, err)
assert.Nil(t, r)
})
t.Run("failed to get renewer", func(t *testing.T) {
vaultTokenPath, err := ioutil.TempFile("", "vault-token")
if err != nil {
t.Fatal(err)
}
defer os.Remove(vaultTokenPath.Name())
os.Setenv("VAULT_TOKEN_PATH", vaultTokenPath.Name())
os.Setenv("VAULT_REAUTH", "false")
defer os.Setenv("VAULT_REAUTH", "")
v, err := NewFromEnvironment()
assert.NotNil(t, v)
assert.NoError(t, err)
// create a new token
v.UseToken(rootToken)
secret, err := v.Client().Auth().Token().CreateOrphan(&api.TokenCreateRequest{
TTL: "3600s",
})
assert.NoError(t, err)
r, err := v.NewRenewer(secret.Auth.ClientToken)
assert.NoError(t, err)
assert.NotNil(t, r)
})
}
type fakeWriter struct{}
func (f *fakeWriter) Write(path string, data map[string]interface{}) (*api.Secret, error) {
return &api.Secret{
Auth: &api.SecretAuth{
ClientToken: rootToken,
},
}, nil
}
type fakeWriterWithWarnings struct{}
func (f *fakeWriterWithWarnings) Write(path string, data map[string]interface{}) (*api.Secret, error) {
return &api.Secret{
Warnings: []string{"warning"},
}, nil
}
|
[
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST"
] |
[]
|
["DOCKER_HOST"]
|
go
| 1 | 0 | |
fhirclient/r4models/claim_tests.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import claim
from .fhirdate import FHIRDate
class ClaimTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Claim", js["resourceType"])
return claim.Claim(js)
def testClaim1(self):
inst = self.instantiate_from("claim-example-institutional-rich.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim1(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim1(inst2)
def implClaim1(self, inst):
self.assertEqual(inst.accident.date.date, FHIRDate("2014-07-09").date)
self.assertEqual(inst.accident.date.as_json(), "2014-07-09")
self.assertEqual(inst.accident.locationAddress.text, "Grouse Mountain Ski Hill")
self.assertEqual(inst.accident.type.coding[0].code, "SPT")
self.assertEqual(inst.accident.type.coding[0].display, "Sporting Accident")
self.assertEqual(inst.accident.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActIncidentCode")
self.assertEqual(inst.billablePeriod.end.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.billablePeriod.end.as_json(), "2014-08-16")
self.assertEqual(inst.billablePeriod.start.date, FHIRDate("2014-08-15").date)
self.assertEqual(inst.billablePeriod.start.as_json(), "2014-08-15")
self.assertEqual(inst.careTeam[0].qualification.coding[0].code, "physician")
self.assertEqual(inst.careTeam[0].qualification.coding[0].system, "http://example.org/fhir/CodeSystem/provider-qualification")
self.assertTrue(inst.careTeam[0].responsible)
self.assertEqual(inst.careTeam[0].role.coding[0].code, "primary")
self.assertEqual(inst.careTeam[0].role.coding[0].system, "http://example.org/fhir/CodeSystem/claim-careteamrole")
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "654456")
self.assertEqual(inst.diagnosis[0].packageCode.coding[0].code, "400")
self.assertEqual(inst.diagnosis[0].packageCode.coding[0].display, "Head trauma - concussion")
self.assertEqual(inst.diagnosis[0].packageCode.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-diagnosisrelatedgroup")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.diagnosis[0].type[0].coding[0].code, "admitting")
self.assertEqual(inst.diagnosis[0].type[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-diagnosistype")
self.assertEqual(inst.id, "960151")
self.assertEqual(inst.identifier[0].system, "http://happyhospital.com/claim")
self.assertEqual(inst.identifier[0].value, "96123451")
self.assertEqual(inst.insurance[0].businessArrangement, "BA987123")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].preAuthRef[0], "PA2014G56473")
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 125.0)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "exam")
self.assertEqual(inst.item[0].productOrService.coding[0].system, "http://hl7.org/fhir/ex-serviceproduct")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 125.0)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.supportingInfo[0].category.coding[0].code, "employmentimpacted")
self.assertEqual(inst.supportingInfo[0].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/claiminformationcategory")
self.assertEqual(inst.supportingInfo[0].sequence, 1)
self.assertEqual(inst.supportingInfo[0].timingPeriod.end.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.supportingInfo[0].timingPeriod.end.as_json(), "2014-08-16")
self.assertEqual(inst.supportingInfo[0].timingPeriod.start.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.supportingInfo[0].timingPeriod.start.as_json(), "2014-08-16")
self.assertEqual(inst.supportingInfo[1].category.coding[0].code, "hospitalized")
self.assertEqual(inst.supportingInfo[1].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/claiminformationcategory")
self.assertEqual(inst.supportingInfo[1].sequence, 2)
self.assertEqual(inst.supportingInfo[1].timingPeriod.end.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.supportingInfo[1].timingPeriod.end.as_json(), "2014-08-16")
self.assertEqual(inst.supportingInfo[1].timingPeriod.start.date, FHIRDate("2014-08-15").date)
self.assertEqual(inst.supportingInfo[1].timingPeriod.start.as_json(), "2014-08-15")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.total.currency, "USD")
self.assertEqual(inst.total.value, 125.0)
self.assertEqual(inst.type.coding[0].code, "institutional")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
def testClaim2(self):
inst = self.instantiate_from("claim-example-professional.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim2(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim2(inst2)
def implClaim2(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "654456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "860150")
self.assertEqual(inst.identifier[0].system, "http://happypdocs.com/claim")
self.assertEqual(inst.identifier[0].value, "8612345")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 75.0)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "exam")
self.assertEqual(inst.item[0].productOrService.coding[0].system, "http://hl7.org/fhir/ex-serviceproduct")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 75.0)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "professional")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
def testClaim3(self):
inst = self.instantiate_from("claim-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim3(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim3(inst2)
def implClaim3(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "123456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "100150")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/claim")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].identifier.system, "http://happyvalley.com/claim")
self.assertEqual(inst.insurance[0].identifier.value, "12345")
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 135.57)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "1200")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 135.57)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Oral Health Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "oral")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
def testClaim4(self):
inst = self.instantiate_from("claim-example-vision.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim4(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim4(inst2)
def implClaim4(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "654321")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "660150")
self.assertEqual(inst.identifier[0].system, "http://happysight.com/claim")
self.assertEqual(inst.identifier[0].value, "6612345")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 80.0)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "exam")
self.assertEqual(inst.item[0].productOrService.coding[0].system, "http://example.org/fhir/CodeSystem/ex-visionservice")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 80.0)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Vision Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "vision")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
def testClaim5(self):
inst = self.instantiate_from("claim-example-vision-glasses-3tier.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim5(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim5(inst2)
def implClaim5(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.contained[0].id, "device-frame")
self.assertEqual(inst.contained[1].id, "device-lens")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "654321")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "660152")
self.assertEqual(inst.identifier[0].system, "http://happysight.com/claim")
self.assertEqual(inst.identifier[0].value, "6612347")
self.assertFalse(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].preAuthRef[0], "PR7652387237")
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertTrue(inst.insurance[1].focal)
self.assertEqual(inst.insurance[1].preAuthRef[0], "AB543GTD7567")
self.assertEqual(inst.insurance[1].sequence, 2)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].category.coding[0].code, "F6")
self.assertEqual(inst.item[0].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.item[0].category.coding[0].system, "http://example.org/fhir/CodeSystem/benefit-subcategory")
self.assertEqual(inst.item[0].detail[0].category.coding[0].code, "F6")
self.assertEqual(inst.item[0].detail[0].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.item[0].detail[0].category.coding[0].system, "http://example.org/fhir/CodeSystem/benefit-subcategory")
self.assertEqual(inst.item[0].detail[0].factor, 1.1)
self.assertEqual(inst.item[0].detail[0].modifier[0].coding[0].code, "rooh")
self.assertEqual(inst.item[0].detail[0].modifier[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/modifiers")
self.assertEqual(inst.item[0].detail[0].net.currency, "USD")
self.assertEqual(inst.item[0].detail[0].net.value, 110.0)
self.assertEqual(inst.item[0].detail[0].productOrService.coding[0].code, "frame")
self.assertEqual(inst.item[0].detail[0].productOrService.coding[0].system, "http://example.org/fhir/CodeSystem/ex-visionservice")
self.assertEqual(inst.item[0].detail[0].revenue.coding[0].code, "0010")
self.assertEqual(inst.item[0].detail[0].revenue.coding[0].display, "Vision Clinic")
self.assertEqual(inst.item[0].detail[0].revenue.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-revenue-center")
self.assertEqual(inst.item[0].detail[0].sequence, 1)
self.assertEqual(inst.item[0].detail[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[0].unitPrice.value, 100.0)
self.assertEqual(inst.item[0].detail[1].category.coding[0].code, "F6")
self.assertEqual(inst.item[0].detail[1].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.item[0].detail[1].category.coding[0].system, "http://example.org/fhir/CodeSystem/benefit-subcategory")
self.assertEqual(inst.item[0].detail[1].net.currency, "USD")
self.assertEqual(inst.item[0].detail[1].net.value, 110.0)
self.assertEqual(inst.item[0].detail[1].productOrService.coding[0].code, "lens")
self.assertEqual(inst.item[0].detail[1].productOrService.coding[0].system, "http://example.org/fhir/CodeSystem/ex-visionservice")
self.assertEqual(inst.item[0].detail[1].programCode[0].coding[0].code, "none")
self.assertEqual(inst.item[0].detail[1].programCode[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-programcode")
self.assertEqual(inst.item[0].detail[1].quantity.value, 2)
self.assertEqual(inst.item[0].detail[1].revenue.coding[0].code, "0010")
self.assertEqual(inst.item[0].detail[1].revenue.coding[0].display, "Vision Clinic")
self.assertEqual(inst.item[0].detail[1].revenue.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-revenue-center")
self.assertEqual(inst.item[0].detail[1].sequence, 2)
self.assertEqual(inst.item[0].detail[1].subDetail[0].category.coding[0].code, "F6")
self.assertEqual(inst.item[0].detail[1].subDetail[0].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.item[0].detail[1].subDetail[0].category.coding[0].system, "http://example.org/fhir/CodeSystem/benefit-subcategory")
self.assertEqual(inst.item[0].detail[1].subDetail[0].factor, 1.1)
self.assertEqual(inst.item[0].detail[1].subDetail[0].modifier[0].coding[0].code, "rooh")
self.assertEqual(inst.item[0].detail[1].subDetail[0].modifier[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/modifiers")
self.assertEqual(inst.item[0].detail[1].subDetail[0].net.currency, "USD")
self.assertEqual(inst.item[0].detail[1].subDetail[0].net.value, 66.0)
self.assertEqual(inst.item[0].detail[1].subDetail[0].productOrService.coding[0].code, "lens")
self.assertEqual(inst.item[0].detail[1].subDetail[0].productOrService.coding[0].system, "http://example.org/fhir/CodeSystem/ex-visionservice")
self.assertEqual(inst.item[0].detail[1].subDetail[0].programCode[0].coding[0].code, "none")
self.assertEqual(inst.item[0].detail[1].subDetail[0].programCode[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-programcode")
self.assertEqual(inst.item[0].detail[1].subDetail[0].quantity.value, 2)
self.assertEqual(inst.item[0].detail[1].subDetail[0].revenue.coding[0].code, "0010")
self.assertEqual(inst.item[0].detail[1].subDetail[0].revenue.coding[0].display, "Vision Clinic")
self.assertEqual(inst.item[0].detail[1].subDetail[0].revenue.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-revenue-center")
self.assertEqual(inst.item[0].detail[1].subDetail[0].sequence, 1)
self.assertEqual(inst.item[0].detail[1].subDetail[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[1].subDetail[0].unitPrice.value, 30.0)
self.assertEqual(inst.item[0].detail[1].subDetail[1].category.coding[0].code, "F6")
self.assertEqual(inst.item[0].detail[1].subDetail[1].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.item[0].detail[1].subDetail[1].category.coding[0].system, "http://example.org/fhir/CodeSystem/benefit-subcategory")
self.assertEqual(inst.item[0].detail[1].subDetail[1].factor, 1.1)
self.assertEqual(inst.item[0].detail[1].subDetail[1].modifier[0].coding[0].code, "rooh")
self.assertEqual(inst.item[0].detail[1].subDetail[1].modifier[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/modifiers")
self.assertEqual(inst.item[0].detail[1].subDetail[1].net.currency, "USD")
self.assertEqual(inst.item[0].detail[1].subDetail[1].net.value, 33.0)
self.assertEqual(inst.item[0].detail[1].subDetail[1].productOrService.coding[0].code, "hardening")
self.assertEqual(inst.item[0].detail[1].subDetail[1].productOrService.coding[0].system, "http://example.org/fhir/CodeSystem/ex-visionservice")
self.assertEqual(inst.item[0].detail[1].subDetail[1].quantity.value, 2)
self.assertEqual(inst.item[0].detail[1].subDetail[1].revenue.coding[0].code, "0010")
self.assertEqual(inst.item[0].detail[1].subDetail[1].revenue.coding[0].display, "Vision Clinic")
self.assertEqual(inst.item[0].detail[1].subDetail[1].revenue.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-revenue-center")
self.assertEqual(inst.item[0].detail[1].subDetail[1].sequence, 2)
self.assertEqual(inst.item[0].detail[1].subDetail[1].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[1].subDetail[1].unitPrice.value, 15.0)
self.assertEqual(inst.item[0].detail[1].subDetail[2].category.coding[0].code, "F6")
self.assertEqual(inst.item[0].detail[1].subDetail[2].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.item[0].detail[1].subDetail[2].category.coding[0].system, "http://example.org/fhir/CodeSystem/benefit-subcategory")
self.assertEqual(inst.item[0].detail[1].subDetail[2].factor, 1.1)
self.assertEqual(inst.item[0].detail[1].subDetail[2].modifier[0].coding[0].code, "rooh")
self.assertEqual(inst.item[0].detail[1].subDetail[2].modifier[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/modifiers")
self.assertEqual(inst.item[0].detail[1].subDetail[2].net.currency, "USD")
self.assertEqual(inst.item[0].detail[1].subDetail[2].net.value, 11.0)
self.assertEqual(inst.item[0].detail[1].subDetail[2].productOrService.coding[0].code, "UV coating")
self.assertEqual(inst.item[0].detail[1].subDetail[2].productOrService.coding[0].system, "http://example.org/fhir/CodeSystem/ex-visionservice")
self.assertEqual(inst.item[0].detail[1].subDetail[2].quantity.value, 2)
self.assertEqual(inst.item[0].detail[1].subDetail[2].revenue.coding[0].code, "0010")
self.assertEqual(inst.item[0].detail[1].subDetail[2].revenue.coding[0].display, "Vision Clinic")
self.assertEqual(inst.item[0].detail[1].subDetail[2].revenue.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-revenue-center")
self.assertEqual(inst.item[0].detail[1].subDetail[2].sequence, 3)
self.assertEqual(inst.item[0].detail[1].subDetail[2].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[1].subDetail[2].unitPrice.value, 5.0)
self.assertEqual(inst.item[0].detail[1].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[1].unitPrice.value, 55.0)
self.assertEqual(inst.item[0].detail[2].category.coding[0].code, "F6")
self.assertEqual(inst.item[0].detail[2].category.coding[0].display, "Vision Coverage")
self.assertEqual(inst.item[0].detail[2].category.coding[0].system, "http://example.org/fhir/CodeSystem/benefit-subcategory")
self.assertEqual(inst.item[0].detail[2].factor, 0.07)
self.assertEqual(inst.item[0].detail[2].net.currency, "USD")
self.assertEqual(inst.item[0].detail[2].net.value, 15.4)
self.assertEqual(inst.item[0].detail[2].productOrService.coding[0].code, "fst")
self.assertEqual(inst.item[0].detail[2].productOrService.coding[0].system, "http://example.org/fhir/CodeSystem/ex-visionservice")
self.assertEqual(inst.item[0].detail[2].revenue.coding[0].code, "0010")
self.assertEqual(inst.item[0].detail[2].revenue.coding[0].display, "Vision Clinic")
self.assertEqual(inst.item[0].detail[2].revenue.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-revenue-center")
self.assertEqual(inst.item[0].detail[2].sequence, 3)
self.assertEqual(inst.item[0].detail[2].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[2].unitPrice.value, 220.0)
self.assertEqual(inst.item[0].modifier[0].coding[0].code, "rooh")
self.assertEqual(inst.item[0].modifier[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/modifiers")
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 235.4)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "glasses")
self.assertEqual(inst.item[0].productOrService.coding[0].system, "http://example.org/fhir/CodeSystem/ex-visionservice")
self.assertEqual(inst.item[0].programCode[0].coding[0].code, "none")
self.assertEqual(inst.item[0].programCode[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-programcode")
self.assertEqual(inst.item[0].revenue.coding[0].code, "0010")
self.assertEqual(inst.item[0].revenue.coding[0].display, "Vision Clinic")
self.assertEqual(inst.item[0].revenue.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-revenue-center")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 235.4)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Vision Claim for Glasses</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "vision")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
def testClaim6(self):
inst = self.instantiate_from("claim-example-institutional.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim6(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim6(inst2)
def implClaim6(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "654456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "960150")
self.assertEqual(inst.identifier[0].system, "http://happyhospital.com/claim")
self.assertEqual(inst.identifier[0].value, "9612345")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 125.0)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "exam")
self.assertEqual(inst.item[0].productOrService.coding[0].system, "http://hl7.org/fhir/ex-serviceproduct")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 125.0)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.procedure[0].date.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.procedure[0].date.as_json(), "2014-08-16")
self.assertEqual(inst.procedure[0].procedureCodeableConcept.coding[0].code, "SDI9901")
self.assertEqual(inst.procedure[0].procedureCodeableConcept.text, "Subcutaneous diagnostic implant")
self.assertEqual(inst.procedure[0].sequence, 1)
self.assertEqual(inst.procedure[0].type[0].coding[0].code, "primary")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.subType.coding[0].code, "emergency")
self.assertEqual(inst.subType.coding[0].system, "http://terminology.hl7.org/CodeSystem/ex-claimsubtype")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.total.currency, "USD")
self.assertEqual(inst.total.value, 125.0)
self.assertEqual(inst.type.coding[0].code, "institutional")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
def testClaim7(self):
inst = self.instantiate_from("claim-example-oral-contained.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim7(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim7(inst2)
def implClaim7(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.contained[0].id, "org-insurer")
self.assertEqual(inst.contained[1].id, "org-org")
self.assertEqual(inst.contained[2].id, "provider-1")
self.assertEqual(inst.contained[3].id, "patient-1")
self.assertEqual(inst.contained[4].id, "coverage-1")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "123456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "100152")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/claim")
self.assertEqual(inst.identifier[0].value, "12347")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 135.57)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "1200")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 135.57)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Oral Health Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "oral")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
def testClaim8(self):
inst = self.instantiate_from("claim-example-pharmacy-medication.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim8(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim8(inst2)
def implClaim8(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "654456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "760151")
self.assertEqual(inst.identifier[0].system, "http://happypharma.com/claim")
self.assertEqual(inst.identifier[0].value, "7612345")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].detail[0].net.currency, "USD")
self.assertEqual(inst.item[0].detail[0].net.value, 45.0)
self.assertEqual(inst.item[0].detail[0].productOrService.coding[0].code, "drugcost")
self.assertEqual(inst.item[0].detail[0].productOrService.coding[0].system, "http://hl7.org/fhir/ex-pharmaservice")
self.assertEqual(inst.item[0].detail[0].sequence, 1)
self.assertEqual(inst.item[0].detail[1].net.currency, "USD")
self.assertEqual(inst.item[0].detail[1].net.value, 9.0)
self.assertEqual(inst.item[0].detail[1].productOrService.coding[0].code, "markup")
self.assertEqual(inst.item[0].detail[1].productOrService.coding[0].system, "http://hl7.org/fhir/ex-pharmaservice")
self.assertEqual(inst.item[0].detail[1].sequence, 2)
self.assertEqual(inst.item[0].detail[2].net.currency, "USD")
self.assertEqual(inst.item[0].detail[2].net.value, 36.0)
self.assertEqual(inst.item[0].detail[2].productOrService.coding[0].code, "dispensefee")
self.assertEqual(inst.item[0].detail[2].productOrService.coding[0].system, "http://hl7.org/fhir/ex-pharmaservice")
self.assertEqual(inst.item[0].detail[2].sequence, 3)
self.assertEqual(inst.item[0].informationSequence[0], 1)
self.assertEqual(inst.item[0].informationSequence[1], 2)
self.assertEqual(inst.item[0].informationSequence[2], 3)
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 90.0)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "562721")
self.assertEqual(inst.item[0].productOrService.coding[0].display, "Alprazolam 0.25mg (Xanax)")
self.assertEqual(inst.item[0].productOrService.coding[0].system, "http://www.nlm.nih.gov/research/umls/rxnorm")
self.assertEqual(inst.item[0].quantity.code, "TAB")
self.assertEqual(inst.item[0].quantity.system, "http://unitsofmeasure.org")
self.assertEqual(inst.item[0].quantity.unit, "TAB")
self.assertEqual(inst.item[0].quantity.value, 90)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "stat")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.supportingInfo[0].category.coding[0].code, "pharmacyrefill")
self.assertEqual(inst.supportingInfo[0].code.coding[0].code, "new")
self.assertEqual(inst.supportingInfo[0].code.coding[0].system, "http://example.org/fhir/CodeSystem/pharmacy-refill")
self.assertEqual(inst.supportingInfo[0].sequence, 1)
self.assertEqual(inst.supportingInfo[1].category.coding[0].code, "pharmacyinformation")
self.assertEqual(inst.supportingInfo[1].code.coding[0].code, "refillsremaining")
self.assertEqual(inst.supportingInfo[1].code.coding[0].system, "http://example.org/fhir/CodeSystem/pharmacy-information")
self.assertEqual(inst.supportingInfo[1].sequence, 2)
self.assertEqual(inst.supportingInfo[1].valueQuantity.value, 2)
self.assertEqual(inst.supportingInfo[2].category.coding[0].code, "pharmacyinformation")
self.assertEqual(inst.supportingInfo[2].code.coding[0].code, "dayssupply")
self.assertEqual(inst.supportingInfo[2].code.coding[0].system, "http://example.org/fhir/CodeSystem/pharmacy-information")
self.assertEqual(inst.supportingInfo[2].sequence, 3)
self.assertEqual(inst.supportingInfo[2].valueQuantity.value, 90)
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Pharmacy Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.total.currency, "USD")
self.assertEqual(inst.total.value, 90.0)
self.assertEqual(inst.type.coding[0].code, "pharmacy")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
def testClaim9(self):
inst = self.instantiate_from("claim-example-oral-orthoplan.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim9(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim9(inst2)
def implClaim9(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2015-03-16").date)
self.assertEqual(inst.created.as_json(), "2015-03-16")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "123457")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].system, "http://hl7.org/fhir/sid/icd-10")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.fundsReserve.coding[0].code, "provider")
self.assertEqual(inst.id, "100153")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/claim")
self.assertEqual(inst.identifier[0].value, "12355")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].detail[0].net.currency, "USD")
self.assertEqual(inst.item[0].detail[0].net.value, 1000.0)
self.assertEqual(inst.item[0].detail[0].productOrService.coding[0].code, "ORTHOEXAM")
self.assertEqual(inst.item[0].detail[0].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[0].sequence, 1)
self.assertEqual(inst.item[0].detail[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[0].unitPrice.value, 1000.0)
self.assertEqual(inst.item[0].detail[1].net.currency, "USD")
self.assertEqual(inst.item[0].detail[1].net.value, 1500.0)
self.assertEqual(inst.item[0].detail[1].productOrService.coding[0].code, "ORTHODIAG")
self.assertEqual(inst.item[0].detail[1].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[1].sequence, 2)
self.assertEqual(inst.item[0].detail[1].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[1].unitPrice.value, 1500.0)
self.assertEqual(inst.item[0].detail[2].net.currency, "USD")
self.assertEqual(inst.item[0].detail[2].net.value, 500.0)
self.assertEqual(inst.item[0].detail[2].productOrService.coding[0].code, "ORTHOINITIAL")
self.assertEqual(inst.item[0].detail[2].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[2].sequence, 3)
self.assertEqual(inst.item[0].detail[2].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[2].unitPrice.value, 500.0)
self.assertEqual(inst.item[0].detail[3].productOrService.coding[0].code, "ORTHOMONTHS")
self.assertEqual(inst.item[0].detail[3].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[3].quantity.value, 24)
self.assertEqual(inst.item[0].detail[3].sequence, 4)
self.assertEqual(inst.item[0].detail[4].net.currency, "USD")
self.assertEqual(inst.item[0].detail[4].net.value, 250.0)
self.assertEqual(inst.item[0].detail[4].productOrService.coding[0].code, "ORTHOPERIODIC")
self.assertEqual(inst.item[0].detail[4].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[4].quantity.value, 24)
self.assertEqual(inst.item[0].detail[4].sequence, 5)
self.assertEqual(inst.item[0].detail[4].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].detail[4].unitPrice.value, 250.0)
self.assertEqual(inst.item[0].diagnosisSequence[0], 1)
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 9000.0)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "ORTHPLAN")
self.assertEqual(inst.item[0].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2015-05-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2015-05-16")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 9000.0)
self.assertEqual(inst.item[1].bodySite.coding[0].code, "21")
self.assertEqual(inst.item[1].bodySite.coding[0].system, "http://fdi.org/fhir/oraltoothcodes")
self.assertEqual(inst.item[1].careTeamSequence[0], 1)
self.assertEqual(inst.item[1].net.currency, "USD")
self.assertEqual(inst.item[1].net.value, 105.0)
self.assertEqual(inst.item[1].productOrService.coding[0].code, "21211")
self.assertEqual(inst.item[1].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[1].sequence, 2)
self.assertEqual(inst.item[1].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[1].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[1].subSite[0].coding[0].code, "L")
self.assertEqual(inst.item[1].subSite[0].coding[0].system, "http://fdi.org/fhir/oralsurfacecodes")
self.assertEqual(inst.item[1].unitPrice.currency, "USD")
self.assertEqual(inst.item[1].unitPrice.value, 105.0)
self.assertEqual(inst.item[2].bodySite.coding[0].code, "36")
self.assertEqual(inst.item[2].bodySite.coding[0].system, "http://fdi.org/fhir/oraltoothcodes")
self.assertEqual(inst.item[2].careTeamSequence[0], 1)
self.assertEqual(inst.item[2].detail[0].net.currency, "USD")
self.assertEqual(inst.item[2].detail[0].net.value, 750.0)
self.assertEqual(inst.item[2].detail[0].productOrService.coding[0].code, "27211")
self.assertEqual(inst.item[2].detail[0].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].detail[0].sequence, 1)
self.assertEqual(inst.item[2].detail[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[2].detail[0].unitPrice.value, 750.0)
self.assertEqual(inst.item[2].detail[1].net.currency, "USD")
self.assertEqual(inst.item[2].detail[1].net.value, 350.0)
self.assertEqual(inst.item[2].detail[1].productOrService.coding[0].code, "lab")
self.assertEqual(inst.item[2].detail[1].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].detail[1].sequence, 2)
self.assertEqual(inst.item[2].detail[1].unitPrice.currency, "USD")
self.assertEqual(inst.item[2].detail[1].unitPrice.value, 350.0)
self.assertEqual(inst.item[2].net.currency, "USD")
self.assertEqual(inst.item[2].net.value, 1100.0)
self.assertEqual(inst.item[2].productOrService.coding[0].code, "27211")
self.assertEqual(inst.item[2].productOrService.coding[0].system, "http://example.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].sequence, 3)
self.assertEqual(inst.item[2].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[2].servicedDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[2].unitPrice.currency, "USD")
self.assertEqual(inst.item[2].unitPrice.value, 1100.0)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Oral Health Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "oral")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "preauthorization")
def testClaim10(self):
inst = self.instantiate_from("claim-example-cms1500-medical.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim10(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim10(inst2)
def implClaim10(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.contained[0].id, "patient-1")
self.assertEqual(inst.contained[1].id, "coverage-1")
self.assertEqual(inst.created.date, FHIRDate("2015-10-16T00:00:00-07:00").date)
self.assertEqual(inst.created.as_json(), "2015-10-16T00:00:00-07:00")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code, "M96.1")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].display, "Postlaminectomy syndrome")
self.assertEqual(inst.diagnosis[0].diagnosisCodeableConcept.coding[0].system, "http://hl7.org/fhir/sid/icd-10")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.diagnosis[1].diagnosisCodeableConcept.coding[0].code, "G89.4")
self.assertEqual(inst.diagnosis[1].diagnosisCodeableConcept.coding[0].display, "Chronic pain syndrome")
self.assertEqual(inst.diagnosis[1].diagnosisCodeableConcept.coding[0].system, "http://hl7.org/fhir/sid/icd-10")
self.assertEqual(inst.diagnosis[1].sequence, 2)
self.assertEqual(inst.diagnosis[2].diagnosisCodeableConcept.coding[0].code, "M53.88")
self.assertEqual(inst.diagnosis[2].diagnosisCodeableConcept.coding[0].display, "Other specified dorsopathies, sacral and sacrococcygeal region")
self.assertEqual(inst.diagnosis[2].diagnosisCodeableConcept.coding[0].system, "http://hl7.org/fhir/sid/icd-10")
self.assertEqual(inst.diagnosis[2].sequence, 3)
self.assertEqual(inst.diagnosis[3].diagnosisCodeableConcept.coding[0].code, "M47.816")
self.assertEqual(inst.diagnosis[3].diagnosisCodeableConcept.coding[0].display, "Spondylosis without myelopathy or radiculopathy, lumbar region")
self.assertEqual(inst.diagnosis[3].diagnosisCodeableConcept.coding[0].system, "http://hl7.org/fhir/sid/icd-10")
self.assertEqual(inst.diagnosis[3].sequence, 4)
self.assertEqual(inst.id, "MED-00050")
self.assertEqual(inst.identifier[0].system, "http://CedarArmsMedicalCenter.com/claim")
self.assertEqual(inst.identifier[0].value, "MED-00050")
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(inst.insurance[0].identifier.system, "http://CedarArmsMedicalCenter.com/claim")
self.assertEqual(inst.insurance[0].identifier.value, "MED-00050")
self.assertEqual(inst.insurance[0].sequence, 1)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(inst.item[0].diagnosisSequence[0], 2)
self.assertEqual(inst.item[0].diagnosisSequence[1], 4)
self.assertEqual(inst.item[0].informationSequence[0], 1)
self.assertEqual(inst.item[0].locationCodeableConcept.coding[0].code, "24")
self.assertEqual(inst.item[0].locationCodeableConcept.coding[0].display, "Ambulatory Surgical Center")
self.assertEqual(inst.item[0].locationCodeableConcept.coding[0].system, "https://www.cms.gov/medicare/coding/place-of-service-codes/place_of_service_code_set.html")
self.assertEqual(inst.item[0].net.currency, "USD")
self.assertEqual(inst.item[0].net.value, 12500.0)
self.assertEqual(inst.item[0].productOrService.coding[0].code, "62264")
self.assertEqual(inst.item[0].productOrService.coding[0].display, "Surgical Procedures on the Spine and Spinal Cord")
self.assertEqual(inst.item[0].productOrService.coding[0].system, "http://www.ama-assn.org/go/cpt")
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2015-10-13").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2015-10-13")
self.assertEqual(inst.item[0].unitPrice.currency, "USD")
self.assertEqual(inst.item[0].unitPrice.value, 12500.0)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.payee.type.coding[0].code, "provider")
self.assertEqual(inst.payee.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/payeetype")
self.assertEqual(inst.priority.coding[0].code, "normal")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.subType.coding[0].code, "831")
self.assertEqual(inst.subType.coding[0].system, "https://www.cms.gov/codes/billtype")
self.assertEqual(inst.supportingInfo[0].category.coding[0].code, "hospitalized")
self.assertEqual(inst.supportingInfo[0].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/claiminformationcategory")
self.assertEqual(inst.supportingInfo[0].sequence, 1)
self.assertEqual(inst.supportingInfo[0].timingPeriod.end.date, FHIRDate("2015-10-05T00:00:00-07:00").date)
self.assertEqual(inst.supportingInfo[0].timingPeriod.end.as_json(), "2015-10-05T00:00:00-07:00")
self.assertEqual(inst.supportingInfo[0].timingPeriod.start.date, FHIRDate("2015-10-01T00:00:00-07:00").date)
self.assertEqual(inst.supportingInfo[0].timingPeriod.start.as_json(), "2015-10-01T00:00:00-07:00")
self.assertEqual(inst.supportingInfo[1].category.coding[0].code, "discharge")
self.assertEqual(inst.supportingInfo[1].category.coding[0].system, "http://terminology.hl7.org/CodeSystem/claiminformationcategory")
self.assertEqual(inst.supportingInfo[1].code.coding[0].code, "01")
self.assertEqual(inst.supportingInfo[1].code.coding[0].display, "Discharge to Home or Self Care")
self.assertEqual(inst.supportingInfo[1].code.coding[0].system, "https://www.cms.gov/Outreach-and-Education/Medicare-Learning-Network-MLN/MLNMattersArticles/downloads/SE0801.pdf")
self.assertEqual(inst.supportingInfo[1].sequence, 2)
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of a CMS 1500 Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.total.currency, "USD")
self.assertEqual(inst.total.value, 12500.0)
self.assertEqual(inst.type.coding[0].code, "institutional")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/claim-type")
self.assertEqual(inst.use, "claim")
|
[] |
[] |
[
"FHIR_UNITTEST_DATADIR"
] |
[]
|
["FHIR_UNITTEST_DATADIR"]
|
python
| 1 | 0 | |
django/core/management/utils.py
|
import os
from subprocess import PIPE, Popen
from django.apps import apps as installed_apps
from django.utils.crypto import get_random_string
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from .base import CommandError, CommandParser
def popen_wrapper(args, stdout_encoding='utf-8'):
"""
Friendly wrapper around Popen.
Return stdout output, stderr output, and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt')
except OSError as err:
raise CommandError('Error executing %s' % args[0]) from err
output, errors = p.communicate()
return (
output.decode(stdout_encoding),
errors.decode(DEFAULT_LOCALE_ENCODING, errors='replace'),
p.returncode
)
def handle_extensions(extensions):
"""
Organize multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, str):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
def get_random_secret_key():
"""
Return a 50 character random string usable as a SECRET_KEY setting value.
"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def parse_apps_and_model_labels(labels):
"""
Parse a list of "app_label.ModelName" or "app_label" strings into actual
objects and return a two-element tuple:
(set of model classes, set of app_configs).
Raise a CommandError if some specified models or apps don't exist.
"""
apps = set()
models = set()
for label in labels:
if '.' in label:
try:
model = installed_apps.get_model(label)
except LookupError:
raise CommandError('Unknown model: %s' % label)
models.add(model)
else:
try:
app_config = installed_apps.get_app_config(label)
except LookupError as e:
raise CommandError(str(e))
apps.add(app_config)
return models, apps
def get_command_line_option(argv, option):
"""
Return the value of a command line option (which should include leading
dashes, e.g. '--testrunnner') from an argument list. Return None if the
option wasn't passed or if the argument list couldn't be parsed.
"""
parser = CommandParser(add_help=False, allow_abbrev=False)
parser.add_argument(option, dest='value')
try:
options, _ = parser.parse_known_args(argv[2:])
except CommandError:
return None
else:
return options.value
|
[] |
[] |
[
"PATH",
"PATHEXT"
] |
[]
|
["PATH", "PATHEXT"]
|
python
| 2 | 0 | |
httpbin/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "httpbin.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
recipes/scipy/run_test.py
|
import sys
import os
# Use OpenBLAS with 1 thread only as it seems to be using too many
# on the CIs apparently.
import scipy
import scipy.cluster._hierarchy
import scipy.cluster._vq
import scipy.fftpack._fftpack
import scipy.fftpack.convolve
import scipy.integrate._dop
import scipy.integrate._odepack
import scipy.integrate._quadpack
import scipy.integrate._test_multivariate
import scipy.integrate._test_odeint_banded
import scipy.integrate.lsoda
import scipy.integrate.vode
import scipy.interpolate._fitpack
import scipy.interpolate._interpolate
import scipy.interpolate._ppoly
import scipy.interpolate.dfitpack
import scipy.interpolate.interpnd
import scipy.io.matlab.mio5_utils
import scipy.io.matlab.mio_utils
import scipy.io.matlab.streams
import scipy.linalg._decomp_update
import scipy.linalg._fblas
import scipy.linalg._flapack
import scipy.linalg._flinalg
import scipy.linalg._interpolative
import scipy.linalg._solve_toeplitz
import scipy.linalg.cython_blas
import scipy.linalg.cython_lapack
import scipy.ndimage._nd_image
import scipy.ndimage._ni_label
import scipy.odr.__odrpack
import scipy.optimize._cobyla
import scipy.optimize._group_columns
import scipy.optimize._lbfgsb
import scipy.optimize._lsq.givens_elimination
import scipy.optimize._minpack
import scipy.optimize._nnls
import scipy.optimize._slsqp
import scipy.optimize._zeros
import scipy.optimize.minpack2
import scipy.optimize.moduleTNC
import scipy.signal._max_len_seq_inner
import scipy.signal._spectral
import scipy.signal.sigtools
import scipy.signal.spline
import scipy.sparse._csparsetools
import scipy.sparse._sparsetools
import scipy.sparse.csgraph._min_spanning_tree
import scipy.sparse.csgraph._reordering
import scipy.sparse.csgraph._shortest_path
import scipy.sparse.csgraph._tools
import scipy.sparse.csgraph._traversal
import scipy.sparse.linalg.dsolve._superlu
import scipy.sparse.linalg.eigen.arpack._arpack
import scipy.sparse.linalg.isolve._iterative
import scipy.spatial._distance_wrap
import scipy.spatial.ckdtree
import scipy.spatial.qhull
import scipy.special._ellip_harm_2
import scipy.special._ufuncs
import scipy.special._ufuncs_cxx
import scipy.special.specfun
import scipy.stats.mvn
import scipy.stats.statlib
import scipy.stats
import scipy.special
sys.exit(scipy.test())
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
column/int128_test.go
|
package column_test
import (
"context"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vahid-sohrabloo/chconn"
"github.com/vahid-sohrabloo/chconn/column"
)
func TestInt128(t *testing.T) {
t.Parallel()
connString := os.Getenv("CHX_TEST_TCP_CONN_STRING")
conn, err := chconn.Connect(context.Background(), connString)
require.NoError(t, err)
res, err := conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_int128`)
require.NoError(t, err)
require.Nil(t, res)
res, err = conn.Exec(context.Background(), `CREATE TABLE test_int128 (
int128 Int128,
int128_nullable Nullable(Int128),
int128_array Array(Int128),
int128_array_nullable Array(Nullable(Int128))
) Engine=Memory`)
require.NoError(t, err)
require.Nil(t, res)
col := column.NewInt128(false)
colArrayValues := column.NewInt128(false)
colArray := column.NewArray(colArrayValues)
colArrayValuesNil := column.NewInt128(true)
colArrayNil := column.NewArray(colArrayValuesNil)
colNil := column.NewInt128(true)
var colInsert [][]byte
var colInsertArray [][][]byte
var colInsertArrayNil [][]*[]byte
var colNilInsert []*[]byte
rows := 10
for i := 1; i <= rows; i++ {
val := make([]byte, column.Int128Size)
val[0] = byte(i)
val2 := make([]byte, column.Int128Size)
val2[0] = byte(i + 1)
valArray := [][]byte{val, val2}
valArrayNil := []*[]byte{&val, nil}
col.Append(val)
colInsert = append(colInsert, val)
// example insert array
colInsertArray = append(colInsertArray, valArray)
colArray.AppendLen(len(valArray))
for _, v := range valArray {
colArrayValues.Append(v)
}
// example insert nullable array
colInsertArrayNil = append(colInsertArrayNil, valArrayNil)
colArrayNil.AppendLen(len(valArrayNil))
for _, v := range valArrayNil {
colArrayValuesNil.AppendP(v)
}
// example add nullable
if i%2 == 0 {
colNilInsert = append(colNilInsert, &val)
if i <= rows/2 {
// example to add by poiner
colNil.AppendP(&val)
} else {
// example to without poiner
colNil.Append(val)
colNil.AppendIsNil(false)
}
} else {
colNilInsert = append(colNilInsert, nil)
if i <= rows/2 {
// example to add by poiner
colNil.AppendP(nil)
} else {
// example to add without poiner
colNil.AppendEmpty()
colNil.AppendIsNil(true)
}
}
}
insertstmt, err := conn.Insert(context.Background(), `INSERT INTO
test_int128 (int128,int128_nullable,int128_array,int128_array_nullable)
VALUES`)
require.NoError(t, err)
require.Nil(t, res)
err = insertstmt.Commit(context.Background(),
col,
colNil,
colArray,
colArrayNil,
)
require.NoError(t, err)
// example read all
selectStmt, err := conn.Select(context.Background(), `SELECT
int128,int128_nullable,int128_array,int128_array_nullable
FROM test_int128`)
require.NoError(t, err)
require.True(t, conn.IsBusy())
colRead := column.NewInt128(false)
colNilRead := column.NewInt128(true)
colArrayReadData := column.NewInt128(false)
colArrayRead := column.NewArray(colArrayReadData)
colArrayReadDataNil := column.NewInt128(true)
colArrayReadNil := column.NewArray(colArrayReadDataNil)
var colData [][]byte
var colNilData []*[]byte
var colArrayData [][][]byte
var colArrayDataNil [][]*[]byte
var colArrayLens []int
for selectStmt.Next() {
err = selectStmt.NextColumn(colRead)
require.NoError(t, err)
colRead.ReadAll(&colData)
err = selectStmt.NextColumn(colNilRead)
require.NoError(t, err)
colNilRead.ReadAllP(&colNilData)
// read array
colArrayLens = colArrayLens[:0]
err = selectStmt.NextColumn(colArrayRead)
require.NoError(t, err)
colArrayRead.ReadAll(&colArrayLens)
for _, l := range colArrayLens {
arr := make([][]byte, l)
colArrayReadData.Fill(arr)
colArrayData = append(colArrayData, arr)
}
// read nullable array
colArrayLens = colArrayLens[:0]
err = selectStmt.NextColumn(colArrayReadNil)
require.NoError(t, err)
colArrayRead.ReadAll(&colArrayLens)
for _, l := range colArrayLens {
arr := make([]*[]byte, l)
colArrayReadDataNil.FillP(arr)
colArrayDataNil = append(colArrayDataNil, arr)
}
}
assert.Equal(t, colInsert, colData)
assert.Equal(t, colNilInsert, colNilData)
assert.Equal(t, colInsertArray, colArrayData)
assert.Equal(t, colInsertArrayNil, colArrayDataNil)
require.NoError(t, selectStmt.Err())
selectStmt.Close()
// example one by one
selectStmt, err = conn.Select(context.Background(), `SELECT
int128,int128_nullable,int128_array,int128_array_nullable FROM
test_int128`)
require.NoError(t, err)
require.True(t, conn.IsBusy())
colRead = column.NewInt128(false)
colNilRead = column.NewInt128(true)
colArrayReadData = column.NewInt128(false)
colArrayRead = column.NewArray(colArrayReadData)
colArrayReadDataNil = column.NewInt128(true)
colArrayReadNil = column.NewArray(colArrayReadDataNil)
colData = colData[:0]
colNilData = colNilData[:0]
colArrayData = colArrayData[:0]
colArrayDataNil = colArrayDataNil[:0]
for selectStmt.Next() {
err = selectStmt.NextColumn(colRead)
require.NoError(t, err)
for colRead.Next() {
colData = append(colData, colRead.Value())
}
// read nullable
err = selectStmt.NextColumn(colNilRead)
require.NoError(t, err)
for colNilRead.Next() {
colNilData = append(colNilData, colNilRead.ValueP())
}
// read array
err = selectStmt.NextColumn(colArrayRead)
require.NoError(t, err)
for colArrayRead.Next() {
arr := make([][]byte, colArrayRead.Value())
colArrayReadData.Fill(arr)
colArrayData = append(colArrayData, arr)
}
// read nullable array
err = selectStmt.NextColumn(colArrayReadNil)
require.NoError(t, err)
for colArrayReadNil.Next() {
arr := make([]*[]byte, colArrayReadNil.Value())
colArrayReadDataNil.FillP(arr)
colArrayDataNil = append(colArrayDataNil, arr)
}
}
assert.Equal(t, colInsert, colData)
assert.Equal(t, colNilInsert, colNilData)
assert.Equal(t, colInsertArray, colArrayData)
assert.Equal(t, colInsertArrayNil, colArrayDataNil)
require.NoError(t, selectStmt.Err())
selectStmt.Close()
conn.Close(context.Background())
}
|
[
"\"CHX_TEST_TCP_CONN_STRING\""
] |
[] |
[
"CHX_TEST_TCP_CONN_STRING"
] |
[]
|
["CHX_TEST_TCP_CONN_STRING"]
|
go
| 1 | 0 | |
contrib/gitian-build.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2019-2020 The TARIAN developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup_linux():
global args, workdir
if os.path.isfile('/usr/bin/apt-get'):
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', os.environ['USER']])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
setup_repos()
elif args.is_fedora:
pkgmgr = 'dnf'
repourl = 'https://download.docker.com/linux/fedora/docker-ce.repo'
elif args.is_centos:
pkgmgr = 'yum'
repourl = 'https://download.docker.com/linux/centos/docker-ce.repo'
if args.is_fedora or args.is_centos:
programs = ['ruby', 'make', 'wget', 'curl']
if args.kvm:
print('KVM not supported with Fedora/CentOS yet.')
sys.exit(1)
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
user = os.environ['USER']
dockers = ['docker-ce', 'docker-ce-cli', 'containerd.io']
if args.is_fedora:
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'dnf-plugins-core'])
subprocess.check_call(['sudo', pkgmgr, 'config-manager', '--add-repo', repourl])
elif args.is_centos:
reqs = ['yum-utils', 'device-mapper-persistent-data', 'lvm2']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + reqs)
subprocess.check_call(['sudo', 'yum-config-manager', '--add-repo', repourl])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + dockers)
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', user])
subprocess.check_call(['sudo', 'systemctl', 'enable', 'docker'])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
subprocess.check_call(['sudo', 'systemctl', 'start', 'docker'])
else:
print('LXC not supported with Fedora/CentOS yet.')
sys.exit(1)
if args.is_fedora:
programs += ['git']
if args.is_centos:
# CentOS ships with an insanely outdated version of git that is no longer compatible with gitian builds
# Check current version and update if necessary
oldgit = b'2.' not in subprocess.check_output(['git', '--version'])
if oldgit:
subprocess.check_call(['sudo', pkgmgr, 'remove', '-y', 'git*'])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'https://centos7.iuscommunity.org/ius-release.rpm'])
programs += ['git2u-all']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + programs)
setup_repos()
else:
print('Unsupported system/OS type.')
sys.exit(1)
def setup_darwin():
global args, workdir
programs = []
if not os.path.isfile('/usr/local/bin/wget'):
programs += ['wget']
if not os.path.isfile('/usr/local/bin/git'):
programs += ['git']
if not os.path.isfile('/usr/local/bin/gsha256sum'):
programs += ['coreutils']
if args.docker:
print('Experimental setup for macOS host')
if len(programs) > 0:
subprocess.check_call(['brew', 'install'] + programs)
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
elif args.kvm or not args.docker:
print('KVM and LXC are not supported under macOS at this time.')
sys.exit(0)
setup_repos()
def setup_repos():
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/tarian-Project/gitian.sigs.git'])
if not os.path.isdir('tarian-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/tarian-Project/tarian-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('tarian'):
subprocess.check_call(['git', 'clone', 'https://github.com/tarian-Project/tarian.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
if args.host_os == 'darwin':
subprocess.check_call(['sed', '-i.old', '/50cacher/d', 'bin/make-base-vm'])
if args.host_os == 'linux':
if args.is_fedora or args.is_centos or args.is_wsl:
subprocess.check_call(['sed', '-i', '/50cacher/d', 'bin/make-base-vm'])
subprocess.check_call(make_image_prog)
subprocess.check_call(['git', 'checkout', 'bin/make-base-vm'])
os.chdir(workdir)
if args.host_os == 'linux':
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
print('Setup complete!')
sys.exit(0)
def build():
global args, workdir
os.makedirs('tarian-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../tarian/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'tarian='+args.commit, '--url', 'tarian='+args.url, '../tarian/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../tarian/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/tarian-*.tar.gz build/out/src/tarian-*.tar.gz ../tarian-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'tarian='+args.commit, '--url', 'tarian='+args.url, '../tarian/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../tarian/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/tarian-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/tarian-*.zip build/out/tarian-*.exe build/out/src/tarian-*.tar.gz ../tarian-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'tarian='+args.commit, '--url', 'tarian='+args.url, '../tarian/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../tarian/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/tarian-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/tarian-*.tar.gz build/out/tarian-*.dmg build/out/src/tarian-*.tar.gz ../tarian-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
# TODO: Skip making signed windows sigs until we actually start producing signed windows binaries
#print('\nSigning ' + args.version + ' Windows')
#subprocess.check_call('cp inputs/tarian-' + args.version + '-win-unsigned.tar.gz inputs/tarian-win-unsigned.tar.gz', shell=True)
#subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../tarian/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../tarian/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call('mv build/out/tarian-*win64-setup.exe ../tarian-binaries/'+args.version, shell=True)
#subprocess.check_call('mv build/out/tarian-*win32-setup.exe ../tarian-binaries/'+args.version, shell=True)
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/tarian-' + args.version + '-osx-unsigned.tar.gz inputs/tarian-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../tarian/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../tarian/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/tarian-osx-signed.dmg ../tarian-binaries/'+args.version+'/tarian-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
os.chdir('gitian.sigs')
commit = False
if os.path.isfile(args.version+'-win-signed/'+args.signer+'/tarian-win-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
commit = True
if os.path.isfile(args.version+'-osx-signed/'+args.signer+'/tarian-dmg-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
commit = True
if commit:
print('\nCommitting '+args.version+' Signed Sigs\n')
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
else:
print('\nNothing to commit\n')
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../tarian/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../tarian/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../tarian/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
# TODO: Skip checking signed windows sigs until we actually start producing signed windows binaries
#print('\nVerifying v'+args.version+' Signed Windows\n')
#if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../tarian/contrib/gitian-descriptors/gitian-win-signer.yml']):
# print('Verifying v'+args.version+' Signed Windows FAILED\n')
# rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../tarian/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/tarian-Project/tarian', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.host_os = sys.platform
if args.host_os == 'win32' or args.host_os == 'cygwin':
raise Exception('Error: Native Windows is not supported by this script, use WSL')
if args.host_os == 'linux':
if os.environ['USER'] == 'root':
raise Exception('Error: Do not run this script as the root user')
args.is_bionic = False
args.is_fedora = False
args.is_centos = False
args.is_wsl = False
if os.path.isfile('/usr/bin/lsb_release'):
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if os.path.isfile('/etc/fedora-release'):
args.is_fedora = True
if os.path.isfile('/etc/centos-release'):
args.is_centos = True
if os.path.isfile('/proc/version') and open('/proc/version', 'r').read().find('Microsoft'):
args.is_wsl = True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
if args.host_os == 'linux':
setup_linux()
elif args.host_os == 'darwin':
setup_darwin()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
if args.host_os == 'darwin':
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
if args.detach_sign:
args.commit_files = False
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('tarian')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
if not os.path.isdir('../gitian-builder/inputs/tarian'):
os.makedirs('../gitian-builder/inputs/tarian')
os.chdir('../gitian-builder/inputs/tarian')
if not os.path.isdir('.git'):
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LXC_GUEST_IP",
"USE_DOCKER",
"USE_LXC",
"USER",
"USE_VBOX",
"GITIAN_HOST_IP",
"PATH"
] |
[]
|
["LXC_GUEST_IP", "USE_DOCKER", "USE_LXC", "USER", "USE_VBOX", "GITIAN_HOST_IP", "PATH"]
|
python
| 7 | 0 | |
publish_netcdf.py
|
import os
import dotenv
import logging
import time
import textwrap
import argparse
import stat
import glob
import pathlib
import datetime
import shutil
import subprocess
from irods.session import iRODSSession
from irods.meta import iRODSMeta
RESOURCE_ID_GLOB = "????????????????????????????????"
EXCLUDED = ["bags", "temp", "zips"]
IS_PUBLIC_KEY = "isPublic"
IS_PUBLIC_VALUE = "true"
NETCDF_EXTENSIONS = [".nc", ".nc4"]
FILE_MODE = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
logger = logging.getLogger(__name__)
class NetCDFPublicationError(Exception):
"""
An Exception class for NetCDF publication.
"""
pass
def rchmod(path, mode):
"""
Recursively change filesystem permissions of path and all of its children.'
rchmod(path, mode) -> None
Where:
path: <str> Absolute path to change filesystems permissions
mode: <int> numeric mode for all changes consistent with constants in the stats library
"""
os.chmod(path, mode)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chmod(os.path.join(root, d), mode)
for f in files:
os.chmod(os.path.join(root, f), mode)
return None
def replace_spaces_in_names(path):
"""
Recursively replace spaces in names of all of the children underneath a path.'
replace_spaces_in_names(path) -> None
Where:
path: <str> Absolute path to traverse with name fixes
This is a fix for a bug in TDS 5 which was already fixed in TDS 4 but has regressed.
When a fix is available in TDS 5 and then deployed, this function may be deprecated.
Spaces are replaced with dunders as cases have been encountered where replacing
with a single underscore resulted in a name collision.
"""
replaced = 0
walk = list(os.walk(path))
walk.reverse()
for root, dirs, files in walk:
for f in files:
if " " in f:
replacement = os.path.join(root, f.replace(" ", "__"))
if pathlib.Path(replacement).exists():
os.remove(replacement)
os.rename(os.path.join(root, f), replacement)
replaced += 1
for d in dirs:
if " " in d:
replacement = os.path.join(root, d.replace(" ", "__"))
if pathlib.Path(replacement).exists():
shutil.rmtree(replacement)
os.rename(os.path.join(root, d), replacement)
replaced += 1
if replaced:
logger.warning(f"Replaced {replaced} name{'s' if replaced != 1 else ''} " \
f"of {'a ' if replaced == 1 else ''}child{'ren' if replaced != 1 else ''} " \
f"in destination path {path}")
return None
def get_latest_resource_timestamp(irods_env, collection_path):
"""
Return the latest modifcation time among the collection's data objects.
get_latest_resource_timestamp(collection_path) -> <datetime.datetime>
Where:
irods_env: <str> Absolute path to the iRODS environment file
collection_path: <str> Absolute iRODS path to the collection
Returns: <datetime.datetime> The latest modification time
This function should become deprecated with iRODS 4.2.9 which updates collection modification times
whenever a contained data object is modified.
"""
with iRODSSession(irods_env_file=irods_env) as session:
collection = session.collections.get(collection_path)
tree = [leaf for leaf in collection.walk()]
data_objects = []
for leaf in tree:
data_objects.extend(leaf[2])
timestamps = [data_object.modify_time for data_object in data_objects]
timestamp = max(timestamps)
return timestamp
def publish_resource(irods_env, proxy_path, catalog_path, resource_id):
"""
Copy the resource with its timestamp.
publish_resource(proxy_path, catalog_path, resource_id) -> None
Where:
irods_env: <str> Absolute path to the iRODS environment file
proxy_path: <str> Absolute iRODS proxy path to Hydroshare resources
catalog_path: <str> Absolute THREDDS catalog path to publish resources
resource_id: <str> Resource ID to publish
Raises:
NetCDFPublicationError
"""
logger.info(f"Publishing resource ID: {resource_id} from {proxy_path} to {catalog_path}")
source = os.path.join(proxy_path, resource_id)
destination = os.path.join(catalog_path, resource_id)
timestamp = get_latest_resource_timestamp(irods_env, source)
# The iget destination is the catalog path in light of https://github.com/irods/irods/issues/5527
proc = subprocess.Popen(["env", f"IRODS_ENVIRONMENT_FILE={irods_env}", "iget", "-rf", source, catalog_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
logger.error(f"Publishing resource ID: {resource_id} from {proxy_path} to {catalog_path} failed:" \
f"return code: {proc.returncode} ::: " \
f'stdout: {stdout} ::: ' \
f"stderr: {stderr}")
raise NetCDFPublicationError(f"iget {source} to {destination} failed",
proc.returncode,
stdout,
stderr)
rchmod(destination, FILE_MODE)
# Fix for TDS 5. Hope to see a fix for this in TDS 5 itself.
replace_spaces_in_names(destination)
os.utime(destination, (timestamp.timestamp(), timestamp.timestamp()))
logger.info(f"Published resource ID: {resource_id} from {proxy_path} to {catalog_path} with timestamp: {timestamp}")
return None
def scan_source(irods_env, proxy_path):
"""
Scan the iRODS proxy path for all public Hydroshare resources containing NetCDF and their timestamps.
scan_source(irods_env, proxy_path) -> [(resource_id, timestamp), ...]
Where:
irods_env: <str> Absolute path to the iRODS environment file
proxy_path: <str> Absolute iRODS proxy path to Hydroshare resources
Returns: <list> of two-<tuple>s where:
a) first element is a <str> resource id, and
b) second element is a <datetime.datetime> modification time.
"""
with iRODSSession(irods_env_file=irods_env) as session:
subcollections = session.collections.get(proxy_path).subcollections
subcollections = [subcollection for subcollection in subcollections if subcollection.name not in EXCLUDED]
logger.info(f"Number of included subcollections: {len(subcollections)}")
public = [subcollection for subcollection in subcollections
if "isPublic" in subcollection.metadata.keys()
and subcollection.metadata[IS_PUBLIC_KEY].value.lower() == IS_PUBLIC_VALUE]
logger.info(f"Number of public included subcollections: {len(public)}")
public_netcdf = []
for subcollection in public:
public_objects = [objs for col, subcol, objs in list(subcollection.walk())]
# flatten the list of lists of data objects
data_objects = []
for objs in public_objects:
data_objects.extend(objs)
netcdf_objects = [obj for obj in data_objects if pathlib.Path(obj.name).suffix.lower() in NETCDF_EXTENSIONS]
if netcdf_objects:
public_netcdf.append(subcollection.name)
logger.info(f"Subcollection name: {subcollection.name}; Number of NetCDF data objects in subcollection: {len(netcdf_objects)}")
logger.info(f"Number of public subcollections containing NetCDF: {len(public_netcdf)}")
source_netcdf = [(resource_id, get_latest_resource_timestamp(irods_env, os.path.join(proxy_path, resource_id)))
for resource_id in public_netcdf]
return source_netcdf
def scan_destination(catalog_path):
"""
Scan the THREDDS catalog path for all resources and their timestamps.
scan_destination(catalog_path) -> [(resource_id, timestamp), ...]
Where:
catalog_path: <str> Absolute THREDDS catalog path to publish resources
Returns: <list> of two-<tuple>s where:
a) first element is a <str> resource id, and
b) second element is a <datetime.datetime> modification time.
"""
resources = glob.glob(os.path.join(catalog_path, RESOURCE_ID_GLOB))
logger.info(f"Number of destination resources: {len(resources)}")
destination_netcdf = [(pathlib.PurePath(resource).name, datetime.datetime.fromtimestamp(os.path.getmtime(resource)))
for resource in resources]
return destination_netcdf
def remove_resource(catalog_path, resource_id):
"""
Remove a resource from the published destination.
remove_resource(catalog_path, resource_id) -> None
Where:
catalog_path: <str> Absolute THREDDS catalog path to publish resources
resource_id: <str> The resource ID to remove from publication
"""
shutil.rmtree(os.path.join(catalog_path, resource_id))
logger.info(f"Removed resource ID: {resource_id}")
return None
def sync_resources(irods_env, proxy_path, catalog_path):
"""
Sync public netcdf resources between iRODS proxy and THREDDS catalog.
sync_resource(irods_env, proxy_path, catalog_path) -> None
Where:
irods_env: <str> Absolute path to the iRODS environment file
proxy_path: <str> Absolute iRODS proxy path to Hydroshare resources
catalog_path: <str> Absolute THREDDS catalog path to publish resources
a) Scan all resources in the source path and publish the public resources containing NetCDF which:
i) do not exist in the destination path, or
ii) are out of date in the destination path, and
b) Scan all resources in the destination path and remove the resources which:
i) no longer exist in the source path, or
ii) are no longer public in the source path.
"""
logger.info(f"Syncing resources from {proxy_path} to {catalog_path}")
start_time = time.perf_counter()
source_netcdf = scan_source(irods_env, proxy_path)
destination_netcdf = scan_destination(catalog_path)
destination_ids = [destination[0] for destination in destination_netcdf]
destination_timestamps = [destination[1] for destination in destination_netcdf]
for source_id, source_timestamp in source_netcdf:
try:
if source_id not in destination_ids:
logger.info(f"Resource ID: {source_id} not in destination")
publish_resource(irods_env, proxy_path, catalog_path, source_id)
else:
index = destination_ids.index(source_id)
destination_timestamp = destination_timestamps[index]
if source_timestamp > destination_timestamp:
logger.info(f"Resource ID: {source_id} source timestamp: {source_timestamp} > destination timestamp: {destination_timestamp}")
publish_resource(irods_env, proxy_path, catalog_path, source_id)
except NetCDFPublicationError as e:
logger.warning(f"Syncing resources from {proxy_path} to {catalog_path} incomplete")
destination_netcdf = scan_destination(catalog_path)
source_ids = [source[0] for source in source_netcdf]
for destination_id, destination_timestamp in destination_netcdf:
if destination_id not in source_ids:
logger.info(f"Resource ID: {destination_id} no longer in source")
remove_resource(catalog_path, destination_id)
end_time = time.perf_counter()
run_time = end_time - start_time
logger.info(f"Resources synced from {proxy_path} to {catalog_path} in {run_time:0.4f} seconds")
return None
if __name__ == "__main__":
epilog = """\
If invoked with a resource ID argument, publish the resource to the destination path, assumed to be referenced in a THREDDS catalog.
Otherwise,
a) scan all resources in the source path and publish the public resources containing NetCDF which:
i) do not exist in the destination path, or
ii) are out of date in the destination path, and
b) scan all resources in the destination path and remove the resources which:
i) no longer exist in the source path, or
ii) are no longer public in the source path."""
parser = argparse.ArgumentParser(description="Publish public Hydroshare resources containing NetCDF.",
epilog=textwrap.dedent(epilog),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("dotenv_path",
help="Absolute path to the .env file.")
parser.add_argument("resource_id",
nargs="?",
default="",
help=textwrap.dedent("""\
Optional resource ID to publish.
If not specified, publish all public Hydroshare resources containing NetCDF."""))
args = parser.parse_args()
dotenv.load_dotenv(dotenv.find_dotenv(args.dotenv_path))
log_file = os.environ["PUBLIC_NETCDF_LOG_FILE"]
irods_env = os.environ["PUBLIC_NETCDF_IRODS_ENVIRONMENT_FILE"]
proxy_path = os.environ["PUBLIC_NETCDF_IRODS_PROXY_PATH"]
catalog_path = os.environ['PUBLIC_NETCDF_THREDDS_CATALOG_PATH']
logging.basicConfig(filename=log_file,
# Available in Python 3.9+
# encoding="utf-8",
level=logging.INFO,
format="[%(asctime)s] [%(levelname)s] %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p")
logger = logging.getLogger(__name__)
if args.resource_id:
try:
publish_resource(irods_env,
proxy_path,
catalog_path,
args.resource_id)
except NetCDFPublicationError as e:
logger.warning(f"Publishing resource {args.resource_id} from {args.src_path} to {args.dest_path} incomplete")
else:
sync_resources(irods_env,
proxy_path,
catalog_path)
|
[] |
[] |
[
"PUBLIC_NETCDF_IRODS_ENVIRONMENT_FILE",
"PUBLIC_NETCDF_LOG_FILE",
"PUBLIC_NETCDF_IRODS_PROXY_PATH",
"PUBLIC_NETCDF_THREDDS_CATALOG_PATH"
] |
[]
|
["PUBLIC_NETCDF_IRODS_ENVIRONMENT_FILE", "PUBLIC_NETCDF_LOG_FILE", "PUBLIC_NETCDF_IRODS_PROXY_PATH", "PUBLIC_NETCDF_THREDDS_CATALOG_PATH"]
|
python
| 4 | 0 | |
KnowledgeMapping/puppeteer_exp/demo_ip.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import asyncio
import pyppeteer
import time
import os
import random
from exe_js import js1, js3, js4, js5
# http://www.mamicode.com/info-detail-2302923.html
# https://segmentfault.com/a/1190000011627343
"""
{
proxy: "127.0.0.1:1234",
proxy-auth: "userx:passx",
proxy-type: "meh"
}
"""
def input_time_random():
return random.randint(300, 500)
async def main():
print("in main ")
print(os.environ.get('PYPPETEER_CHROMIUM_REVISION'))
browser = await pyppeteer.launch(
executablePath=r"D:\A\Desktop\项目+更新\node_project\chrome-win\chrome-win\chrome.exe",
headless=False,
args=[
'--proxy-server=118.24.156.214:8118'
],
timeout=30000)
page = await browser.newPage()
await page.setViewport({"width": 1000, "height": 780})
await page.setUserAgent("Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36")
await page.goto('http://httpbin.net/ip')
# await page.waitForNavigation({'waitUntil': 'load'}) # 有时候不需要
content = await page.content()
cookies = await page.cookies()
await page.screenshot({'path': 'example.png'})
dimensions = await page.evaluate('''() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}''')
print(dimensions)
await browser.close()
return {'content': content, 'cookies': cookies}
asyncio.get_event_loop().run_until_complete(main())
|
[] |
[] |
[
"PYPPETEER_CHROMIUM_REVISION"
] |
[]
|
["PYPPETEER_CHROMIUM_REVISION"]
|
python
| 1 | 0 | |
CQGoodmorning/GoodMorning/utils.py
|
# -*- coding: utf-8 -*-
import os
import aiofiles
import ujson
from dateutil.parser import parse
import datetime
import random
from .config import *
async def jsonRead(p):
if not os.path.exists(p):
return ERROR
async with aiofiles.open(p, 'r', encoding='utf-8') as f:
content = await f.read()
content = ujson.loads(content)
return content
async def jsonWrite(p, info):
async with aiofiles.open(p, 'w', encoding='utf-8') as f:
await f.write(ujson.dumps(info))
return SUCCESS
async def getTheCurrentTime():
nowDate = str(datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d'))
return nowDate
async def getAccurateTimeNow():
nowDate = str(datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d/%H:%M:%S'))
return nowDate
async def timeDifferenceFromNowOn(original):
a = parse(str(original))
b = parse(await getTheCurrentTime())
return int((b - a).days)
async def judgeTimeDifference(lastTime):
timeNow = await getAccurateTimeNow()
a = parse(lastTime)
b = parse(timeNow)
return int((b - a).total_seconds() / 3600)
async def calculateTheElapsedTimeCombination(lastTime):
timeNow = await getAccurateTimeNow()
a = parse(lastTime)
b = parse(timeNow)
seconds = int((b - a).total_seconds())
return [int(seconds / 3600), int((seconds % 3600) / 60), int(seconds % 60)]
async def getTheCurrentHour():
return int(str(datetime.datetime.strftime(datetime.datetime.now(),'%H')))
async def userInformationReading(userQQ):
p = './GoodMorning/Data/User/' + str(userQQ) + '.json'
content = await jsonRead(p)
return content
async def userInformationWriting(userQQ, info):
p = './GoodMorning/Data/User/' + str(userQQ) + '.json'
await jsonWrite(p, info)
return SUCCESS
async def groupRead(userGroup):
p = './GoodMorning/Data/Group/' + str(userGroup) + '.json'
group = await jsonRead(p)
return group
async def groupWrite(userGroup, info):
p = './GoodMorning/Data/Group/' + str(userGroup) + '.json'
await jsonWrite(p, info)
return SUCCESS
async def at(userQQ):
return '[CQ:at,qq=' + str(userQQ) + ']\n'
async def readConfiguration(model):
if model == MORNING_MODEL:
return await jsonRead('./GoodMorning/Config/GoodMorning.json')
if model == NIGHT_MODEL:
return await jsonRead('./GoodMorning/Config/GoodNight.json')
async def extractRandomWords(model, sender):
name = sender['card']
if name == '':
name = sender['nickname']
return random.choice((await readConfiguration(model))['statement'])['content'].replace(r'{name}', name)
async def extractConfigurationInformationAccordingToSpecifiedParameters(parameter, model):
return (await readConfiguration(model))[parameter]
async def replaceHourMinuteAndSecond(parameterList, msg):
return (msg.replace(r'{hour}', str(parameterList[0]))
.replace(r'{minute}', str(parameterList[1]))
.replace(r'{second}', str(parameterList[2])))
async def sendMsg(bot, userGroup, send):
if send != '' and send != ERROR:
await bot.send_group_msg(group_id = int(userGroup), message = str(send))
return True
return False
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
virtualenv/lib/python3.6/site-packages/flake8/main.py
|
# -*- coding: utf-8 -*-
import os
import sys
import setuptools
from flake8.engine import get_parser, get_style_guide
from flake8.util import is_flag, flag_on
if sys.platform.startswith('win'):
DEFAULT_CONFIG = os.path.expanduser(r'~\.flake8')
else:
DEFAULT_CONFIG = os.path.join(
os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'),
'flake8'
)
EXTRA_IGNORE = []
def main():
"""Parse options and run checks on Python source."""
# Prepare
flake8_style = get_style_guide(parse_argv=True, config_file=DEFAULT_CONFIG)
options = flake8_style.options
if options.install_hook:
from flake8.hooks import install_hook
install_hook()
# Run the checkers
report = flake8_style.check_files()
exit_code = print_report(report, flake8_style)
if exit_code > 0:
raise SystemExit(exit_code > 0)
def print_report(report, flake8_style):
# Print the final report
options = flake8_style.options
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
if not options.exit_zero:
return 1
return 0
def check_file(path, ignore=(), complexity=-1):
"""Checks a file using pep8 and pyflakes by default and mccabe
optionally.
:param str path: path to the file to be checked
:param tuple ignore: (optional), error and warning codes to be ignored
:param int complexity: (optional), enables the mccabe check for values > 0
"""
ignore = set(ignore).union(EXTRA_IGNORE)
flake8_style = get_style_guide(
config_file=DEFAULT_CONFIG, ignore=ignore, max_complexity=complexity)
return flake8_style.input_file(path)
def check_code(code, ignore=(), complexity=-1):
"""Checks code using pep8 and pyflakes by default and mccabe optionally.
:param str code: code to be checked
:param tuple ignore: (optional), error and warning codes to be ignored
:param int complexity: (optional), enables the mccabe check for values > 0
"""
ignore = set(ignore).union(EXTRA_IGNORE)
flake8_style = get_style_guide(
config_file=DEFAULT_CONFIG, ignore=ignore, max_complexity=complexity)
return flake8_style.input_file(None, lines=code.splitlines(True))
class Flake8Command(setuptools.Command):
"""The :class:`Flake8Command` class is used by setuptools to perform
checks on registered modules.
"""
description = "Run flake8 on modules registered in setuptools"
user_options = []
def initialize_options(self):
self.option_to_cmds = {}
parser = get_parser()[0]
for opt in parser.option_list:
cmd_name = opt._long_opts[0][2:]
option_name = cmd_name.replace('-', '_')
self.option_to_cmds[option_name] = cmd_name
setattr(self, option_name, None)
def finalize_options(self):
self.options_dict = {}
for (option_name, cmd_name) in self.option_to_cmds.items():
if option_name in ['help', 'verbose']:
continue
value = getattr(self, option_name)
if value is None:
continue
if is_flag(value):
value = flag_on(value)
self.options_dict[option_name] = value
def distribution_files(self):
if self.distribution.packages:
package_dirs = self.distribution.package_dir or {}
for package in self.distribution.packages:
pkg_dir = package
if package in package_dirs:
pkg_dir = package_dirs[package]
elif '' in package_dirs:
pkg_dir = package_dirs[''] + os.path.sep + pkg_dir
yield pkg_dir.replace('.', os.path.sep)
if self.distribution.py_modules:
for filename in self.distribution.py_modules:
yield "%s.py" % filename
# Don't miss the setup.py file itself
yield "setup.py"
def run(self):
# Prepare
paths = list(self.distribution_files())
flake8_style = get_style_guide(config_file=DEFAULT_CONFIG,
paths=paths,
**self.options_dict)
# Run the checkers
report = flake8_style.check_files()
exit_code = print_report(report, flake8_style)
raise SystemExit(exit_code > 0)
|
[] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
python
| 1 | 0 | |
netty-client/src/test/java/redis/netty/client/RedisClientBaseTest.java
|
package redis.netty.client;
import org.junit.Test;
import redis.Command;
import redis.netty.BulkReply;
import redis.netty.IntegerReply;
import redis.netty.StatusReply;
import spullara.util.concurrent.Promise;
import spullara.util.functions.Block;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Test the base redis client. Default redis required.
*/
public class RedisClientBaseTest {
@Test
public void testConnect() throws Exception {
final CountDownLatch done = new CountDownLatch(1);
final AtomicBoolean success = new AtomicBoolean();
final AtomicReference<RedisClientBase> client = new AtomicReference<>();
Promise<RedisClientBase> connect = RedisClientBase.connect("localhost", 6379);
connect.onSuccess(new Block<RedisClientBase>() {
@Override
public void apply(RedisClientBase redisClientBase) {
success.set(true);
client.set(redisClientBase);
done.countDown();
}
}).onFailure(new Block<Throwable>() {
@Override
public void apply(Throwable throwable) {
success.set(false);
done.countDown();
}
});
done.await(5000, TimeUnit.MILLISECONDS);
final CountDownLatch done2 = new CountDownLatch(1);
assertTrue(success.get());
client.get().close().onSuccess(new Block<Void>() {
@Override
public void apply(Void aVoid) {
success.set(true);
done2.countDown();
}
}).onFailure(new Block<Throwable>() {
@Override
public void apply(Throwable throwable) {
success.set(false);
done2.countDown();
}
});
done2.await(5000, TimeUnit.MILLISECONDS);
assertTrue(success.get());
}
@Test
public void testConnectFailure() throws Exception {
final CountDownLatch done = new CountDownLatch(1);
final AtomicBoolean success = new AtomicBoolean();
final AtomicReference<Throwable> failure = new AtomicReference<>();
Promise<RedisClientBase> connect = RedisClientBase.connect("localhost", 6380);
connect.onSuccess(new Block<RedisClientBase>() {
@Override
public void apply(RedisClientBase redisClientBase) {
success.set(true);
done.countDown();
}
}).onFailure(new Block<Throwable>() {
@Override
public void apply(Throwable throwable) {
success.set(false);
failure.set(throwable);
done.countDown();
}
});
done.await(5000, TimeUnit.MILLISECONDS);
assertFalse(success.get());
assertTrue("Connection not refused", failure.get().getMessage().startsWith("Connection refused"));
}
@Test
public void testError() throws ExecutionException, InterruptedException {
try {
RedisClient client = RedisClient.connect("localhost", 6379).get();
client.set("test", "value").get();
client.hgetall("test").get();
fail("Should have failed");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof RedisException);
}
}
@Test
public void testExecute() throws Exception {
final CountDownLatch done = new CountDownLatch(1);
final AtomicBoolean success = new AtomicBoolean();
RedisClientBase.connect("localhost", 6379).onSuccess(new Block<RedisClientBase>() {
@Override
public void apply(final RedisClientBase redisClientBase) {
redisClientBase.execute(StatusReply.class, new Command("set", "test", "test")).onSuccess(new Block<StatusReply>() {
@Override
public void apply(StatusReply reply) {
if (reply.data().equals("OK")) {
redisClientBase.execute(BulkReply.class, new Command("get", "test")).onSuccess(new Block<BulkReply>() {
@Override
public void apply(BulkReply reply) {
if (reply.asAsciiString().equals("test")) {
success.set(true);
}
done.countDown();
redisClientBase.close();
}
});
} else {
done.countDown();
redisClientBase.close();
}
}
});
}
});
done.await(5000, TimeUnit.MILLISECONDS);
assertTrue(success.get());
}
@Test
public void testCommands() throws InterruptedException {
final CountDownLatch done = new CountDownLatch(1);
final AtomicReference<StatusReply> setOK = new AtomicReference<>();
final AtomicReference<BulkReply> getTest2 = new AtomicReference<>();
RedisClient.connect("localhost", 6379).onSuccess(new Block<RedisClient>() {
@Override
public void apply(final RedisClient redisClient) {
redisClient.set("test", "test2").onSuccess(new Block<StatusReply>() {
@Override
public void apply(StatusReply statusReply) {
setOK.set(statusReply);
redisClient.get("test").onSuccess(new Block<BulkReply>() {
@Override
public void apply(BulkReply bulkReply) {
getTest2.set(bulkReply);
redisClient.close().onSuccess(new Block<Void>() {
@Override
public void apply(Void aVoid) {
done.countDown();
}
});
}
});
}
});
}
}).onFailure(new Block<Throwable>() {
@Override
public void apply(Throwable throwable) {
throwable.printStackTrace();
}
});
done.await(5000, TimeUnit.MILLISECONDS);
assertEquals("OK", setOK.get().data());
assertEquals("test2", getTest2.get().asAsciiString());
}
@Test
public void testSerialPerformance() throws InterruptedException {
if (System.getenv().containsKey("CI") || System.getProperty("CI") != null) return;
final CountDownLatch done = new CountDownLatch(1);
final int[] i = new int[1];
RedisClient.connect("localhost", 6379).onSuccess(new Block<RedisClient>() {
long start;
private Block<StatusReply> setBlock;
void again(RedisClient redisClient) {
apply(redisClient);
}
@Override
public void apply(final RedisClient redisClient) {
if (start == 0) {
setBlock = new Block<StatusReply>() {
@Override
public void apply(StatusReply statusReply) {
again(redisClient);
}
};
start = System.currentTimeMillis();
}
if (System.currentTimeMillis() - start < 5000) {
redisClient.set(String.valueOf(i[0]++), "test2").onSuccess(setBlock);
} else {
redisClient.close().onSuccess(new Block<Void>() {
@Override
public void apply(Void aVoid) {
done.countDown();
}
});
}
}
});
done.await(6000, TimeUnit.MILLISECONDS);
System.out.println("Completed " + i[0] / 5 + " per second");
}
@Test
public void testPipelinePerformance() throws InterruptedException {
if (System.getenv().containsKey("CI") || System.getProperty("CI") != null) return;
final CountDownLatch done = new CountDownLatch(1);
final AtomicInteger total = new AtomicInteger();
RedisClient.connect("localhost", 6379).onSuccess(new Block<RedisClient>() {
@Override
public void apply(final RedisClient redisClient) {
new Thread(new Runnable() {
@Override
public void run() {
try {
final Semaphore semaphore = new Semaphore(100);
Runnable release = new Runnable() {
@Override
public void run() {
semaphore.release();
}
};
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < 5000) {
semaphore.acquire();
String current = String.valueOf(total.getAndIncrement());
redisClient.set(current, current).ensure(release);
}
semaphore.acquire(100);
done.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}).start();
}
});
done.await(6000, TimeUnit.MILLISECONDS);
if (total.get() == 100) {
fail("Failed to complete any requests");
}
System.out.println("Completed " + total.get() / 5 + " per second");
}
@Test
public void testPipelineConcurrency() throws InterruptedException {
if (System.getenv().containsKey("CI") || System.getProperty("CI") != null) return;
final CountDownLatch done = new CountDownLatch(1);
final AtomicInteger total = new AtomicInteger();
final AtomicInteger errors = new AtomicInteger();
RedisClient.connect("localhost", 6379).onSuccess(new Block<RedisClient>() {
@Override
public void apply(final RedisClient redisClient) {
new Thread(new Runnable() {
@Override
public void run() {
try {
final Semaphore semaphore = new Semaphore(100);
Runnable release = new Runnable() {
@Override
public void run() {
semaphore.release();
}
};
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < 5000) {
semaphore.acquire();
final String current = String.valueOf(total.getAndIncrement());
redisClient.set(current, current).ensure(release).onSuccess(new Block<StatusReply>() {
@Override
public void apply(StatusReply statusReply) {
redisClient.get(current).onSuccess(new Block<BulkReply>() {
@Override
public void apply(BulkReply bulkReply) {
String s = bulkReply.asAsciiString();
if (!s.equals(current)) {
System.out.println(s + " != " + current);
errors.incrementAndGet();
}
}
}).onFailure(new Block<Throwable>() {
@Override
public void apply(Throwable throwable) {
errors.incrementAndGet();
}
});
}
});
}
semaphore.acquire(100);
done.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}).start();
}
});
done.await(6000, TimeUnit.MILLISECONDS);
System.out.println("Completed " + total.get() / 5 + " per second");
assertEquals(0, errors.get());
if (total.get() == 100) {
fail("Failed to complete any requests");
}
}
@Test
public void testPubSub() throws InterruptedException, ExecutionException {
final CountDownLatch done = new CountDownLatch(2);
final Promise<Void> wassubscribed = new Promise<>();
final AtomicReference<byte[]> gotmessage = new AtomicReference<>();
final AtomicLong listeners = new AtomicLong(0);
final AtomicBoolean failed = new AtomicBoolean();
RedisClient.connect("localhost", 6379).onSuccess(new Block<RedisClient>() {
@Override
public void apply(final RedisClient redisClient) {
redisClient.addListener(new ReplyListener() {
@Override
public void subscribed(byte[] name, int channels) {
wassubscribed.set(null);
}
@Override
public void psubscribed(byte[] name, int channels) {
failed.set(true);
}
@Override
public void unsubscribed(byte[] name, int channels) {
failed.set(true);
}
@Override
public void punsubscribed(byte[] name, int channels) {
failed.set(true);
}
@Override
public void message(byte[] channel, byte[] message) {
gotmessage.set(message);
redisClient.close();
done.countDown();
}
@Override
public void pmessage(byte[] pattern, byte[] channel, byte[] message) {
failed.set(true);
}
});
redisClient.subscribe("test").onSuccess(new Block<Void>() {
@Override
public void apply(Void aVoid) {
RedisClient.connect("localhost", 6379).onSuccess(new Block<RedisClient>() {
@Override
public void apply(final RedisClient redisClient) {
wassubscribed.onSuccess(new Block<Void>() {
@Override
public void apply(Void aVoid) {
redisClient.publish("test", "hello").onSuccess(new Block<IntegerReply>() {
@Override
public void apply(IntegerReply integerReply) {
listeners.set(integerReply.data());
redisClient.close();
done.countDown();
}
});
}
});
}
});
}
});
}
});
done.await(10000, TimeUnit.MILLISECONDS);
assertTrue(wassubscribed.get() == null);
assertEquals("hello", new String(gotmessage.get()));
assertEquals(1, listeners.get());
assertFalse(failed.get());
}
@Test
public void testPubSubPerformance() throws InterruptedException {
if (System.getenv().containsKey("CI") || System.getProperty("CI") != null) return;
final CountDownLatch done = new CountDownLatch(1);
final Semaphore semaphore = new Semaphore(100);
final AtomicInteger total = new AtomicInteger();
Promise<RedisClient> redisClient = RedisClient.connect("localhost", 6379).onSuccess(new Block<RedisClient>() {
@Override
public void apply(RedisClient redisClient) {
redisClient.addListener(new MessageListener() {
@Override
public void message(byte[] channel, byte[] message) {
semaphore.release();
total.incrementAndGet();
}
@Override
public void pmessage(byte[] pattern, byte[] channel, byte[] message) {
}
});
redisClient.subscribe("test").onSuccess(new Block<Void>() {
@Override
public void apply(Void aVoid) {
RedisClient.connect("localhost", 6379).onSuccess(new Block<RedisClient>() {
@Override
public void apply(final RedisClient redisClient) {
new Thread(new Runnable() {
@Override
public void run() {
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < 5000) {
semaphore.acquireUninterruptibly();
redisClient.publish("test", "hello");
}
redisClient.close();
done.countDown();
}
}).start();
}
});
}
});
}
});
done.await(6000, TimeUnit.MILLISECONDS);
redisClient.onSuccess(new Block<RedisClient>() {
@Override
public void apply(RedisClient redisClient) {
redisClient.close();
}
});
System.out.println(total.get() / 5 + " per second");
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
thespian/system/admin/convention.py
|
import logging
from thespian.actors import *
from thespian.system.utilis import (thesplog, checkActorCapabilities,
foldl, join, fmap, AssocList,
actualActorClass)
from thespian.system.timing import ExpirationTimer, currentTime
from thespian.system.logdirector import LogAggregator
from thespian.system.admin.globalNames import GlobalNamesAdmin
from thespian.system.admin.adminCore import PendingSource
from thespian.system.transport import (TransmitIntent, ReceiveEnvelope,
Thespian__Run_Terminated)
from thespian.system.messages.admin import PendingActorResponse
from thespian.system.messages.convention import *
from thespian.system.sourceLoader import loadModuleFromHashSource
from thespian.system.transport.hysteresis import HysteresisDelaySender
from functools import partial
from datetime import (timedelta, datetime)
from thespian.system.transport.IPBase import (TCPv4ActorAddress)
import os
try:
registration_period = os.environ['CONVENTION_REREGISTRATION_PERIOD']
rgsrtn_prd_mnt = int(registration_period.split(':')[0])
rgsrtn_prd_sec = int(registration_period.split(':')[1])
CONVENTION_REREGISTRATION_PERIOD = timedelta(minutes=rgsrtn_prd_mnt, seconds=rgsrtn_prd_sec)
except Exception as ex:
thesplog('Cannot process CONVENTION_REREGISTRATION_PERIOD from environment variables. Assigning default (7 min 22 secs). Exception: %s %s', \
ex, type(ex), level=logging.WARNING)
CONVENTION_REREGISTRATION_PERIOD = timedelta(minutes=7, seconds=22)
try:
restart_period = os.environ['CONVENTION_RESTART_PERIOD']
restart_prd_mnt = int(restart_period.split(':')[0])
restart_prd_sec = int(restart_period.split(':')[1])
CONVENTION_RESTART_PERIOD = timedelta(minutes=restart_prd_mnt, seconds=restart_prd_sec)
except Exception as ex:
thesplog('Cannot process CONVENTION_RESTART_PERIOD from environment variables. Assigning default (3 min 22 secs). Exception: %s %s', \
ex, type(ex), level=logging.WARNING)
CONVENTION_RESTART_PERIOD = timedelta(minutes=3, seconds=22)
try:
convention_regstrn = os.environ['CONVENTION_REGISTRATION_MISS_MAX']
CONVENTION_REGISTRATION_MISS_MAX = int(convention_regstrn)
except Exception as ex:
thesplog('Cannot process CONVENTION_REGISTRATION_MISS_MAX from environment variables. Assigning default (3). Exception: %s %s', \
ex, type(ex), level=logging.WARNING)
CONVENTION_REGISTRATION_MISS_MAX = 3 # # of missing convention registrations before death declared
CONVENTION_REINVITE_ADJUSTMENT = 1.1 # multiply by remote checkin expected time for new invite timeout period
CURR_CONV_ADDR_IPV4 = 'Convention Address.IPv4'
def convention_reinvite_adjustment(t):
try:
return t * CONVENTION_REINVITE_ADJUSTMENT
except TypeError:
# Python2 cannot multiply timedelta by a float, so take a longer route
return t + (t / int(1 / (CONVENTION_REINVITE_ADJUSTMENT % 1)))
class PreRegistration(object):
def __init__(self):
self.pingValid = ExpirationTimer(timedelta(seconds=0))
self.pingPending = False
def refresh(self):
self.pingValid = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
class ConventionMemberData(object):
def __init__(self, address, capabilities, preRegOnly=False):
self.remoteAddress = address
self.remoteCapabilities = capabilities
self.hasRemoteActors = [] # (localParent, remoteActor) addresses created remotely
# The preRegOnly field indicates that this information is only
# from a pre-registration.
self.preRegOnly = preRegOnly
# preRegistered is not None if the ConventionRegister has the
# preRegister flag set. This indicates a call from
# preRegisterRemoteSystem. The pingValid is only used for
# preRegistered systems and is used to determine how long an
# active check of the preRegistered remote is valid. If
# pingValid is expired, the local attempts to send a
# QueryExists message (which will retry) and a QueryAck will
# reset pingValid to another CONVENTION_REGISTRATION_PERIOD.
# The pingPending is true while the QueryExists is pending and
# will suppress additional pingPending messages. A success or
# failure completion of a QueryExists message will reset
# pingPending to false. Note that pinging occurs continually
# for a preRegistered remote, regardless of whether or not its
# Convention membership is currently valid.
self.preRegistered = None # or PreRegistration object
self._reset_valid_timer()
@property
def permanentEntry(self):
return bool(self.preRegOnly or self.preRegistered)
def createdActor(self, localParentAddress, newActorAddress):
entry = localParentAddress, newActorAddress
if entry not in self.hasRemoteActors:
self.hasRemoteActors.append(entry)
def refresh(self, remoteCapabilities, preReg=False):
self.remoteCapabilities = remoteCapabilities
self._reset_valid_timer()
if self.preRegistered:
self.preRegistered.refresh()
def _reset_valid_timer(self):
# registryValid is a timer that is usually set to a multiple
# of the convention re-registration period. Each successful
# convention re-registration resets the timer to the maximum
# value (actually, it replaces this structure with a newly
# generated structure). If the timer expires, the remote is
# declared as dead and the registration is removed (or
# quiesced if it is a pre-registration).
self.registryValid = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD *
CONVENTION_REGISTRATION_MISS_MAX)
def __str__(self):
return 'ActorSystem @ %s%s, registry valid for %s with %s' % (
str(self.remoteAddress),
(' (prereg-only)' if self.preRegOnly else
(' (prereg)' if self.preRegistered else '')),
str(self.registryValid),
str(self.remoteCapabilities))
class HysteresisCancel(object):
def __init__(self, cancel_addr):
self.cancel_addr = cancel_addr
class HysteresisSend(TransmitIntent): pass
class LostRemote(object):
# tells transport to reset (close sockets, drop buffers, etc.)
def __init__(self, lost_addr):
self.lost_addr = lost_addr
class LocalConventionState(object):
def __init__(self, myAddress, capabilities, sCBStats, getAllConventionAddressesFunc):
self._myAddress = myAddress
self._capabilities = capabilities
self._sCBStats = sCBStats
self._conventionMembers = AssocList() # key=Remote Admin Addr, value=ConventionMemberData
self._conventionNotificationHandlers = []
self._convntn_ipv4_marker = 0
self._conventionAddresses = getAllConventionAddressesFunc(capabilities)
# This is mostly needed for backward compatibility at this time
self._conventionAddress = self._conventionAddresses[self._convntn_ipv4_marker]
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
self._has_been_activated = False
self._invited = False # entered convention as a result of an explicit invite
self._current_avlbl_leaders = list(map(LocalConventionState._populate_initial_leaders, self._conventionAddresses))
self._avlbl_leader_last_knwn_ts = int(datetime.utcnow().strftime('%Y%m%d%H%M%S%f')[:-3])
@property
def myAddress(self):
return self._myAddress
@property
def capabilities(self):
return self._capabilities
def updateStatusResponse(self, resp):
resp.setConventionLeaderAddress(self.conventionLeaderAddr)
resp.setConventionRegisterTime(self._conventionRegistration)
for each in self._conventionMembers.values():
resp.addConventioneer(each.remoteAddress, each.registryValid)
resp.setNotifyHandlers(self._conventionNotificationHandlers)
def active_in_convention(self):
# If this is the convention leader, it is automatically
# active, otherwise this convention member should have a
# convention leader and that leader should have an active
# entry in the _conventionMembers table (indicating it has
# updated this system with its information)
return bool(self.conventionLeaderAddr and
self._conventionMembers.find(self.conventionLeaderAddr))
@property
def conventionLeaderAddr(self):
return self._conventionAddresses[self._convntn_ipv4_marker]
def isConventionLeader(self):
# Might also be the leader if self.conventionLeaderAddr is None
return self.conventionLeaderAddr == self.myAddress
@staticmethod
def _populate_initial_leaders(leader_address):
thesplog(' _populate_initial_leaders: %s', leader_address, level=logging.DEBUG)
curr_leader = {}
curr_leader['actor_address'] = leader_address
curr_leader['address'] = str(leader_address)
# Status is UNKNOWN unless confirmed otherwise
curr_leader['status'] = 'UNKNOWN'
# Defaulted to current system up (UTC)
curr_leader['last_known_ts'] = int(datetime.utcnow().strftime('%Y%m%d%H%M%S%f')[:-3])
return curr_leader
def capabilities_have_changed(self, new_capabilities):
self._capabilities = new_capabilities
return self.setup_convention()
def setup_convention(self, activation=False):
self._has_been_activated |= activation
rmsgs = []
# If not specified in capabilities, don't override any invites
# that may have been received.
leader_is_gone = (self._conventionMembers.find(self.conventionLeaderAddr) is None) \
if self.conventionLeaderAddr else True
thesplog(' isConventionLeader:%s, conventionLeaderAddr: %s', self.isConventionLeader(), self.conventionLeaderAddr, level=logging.DEBUG)
if not self.isConventionLeader() and self.conventionLeaderAddr:
thesplog('Admin registering with Convention @ %s (%s)',
self.conventionLeaderAddr,
'first time' if leader_is_gone else 're-registering',
level=logging.INFO, primary=True)
rmsgs.append(
HysteresisSend(self.conventionLeaderAddr,
ConventionRegister(self.myAddress,
self.capabilities,
leader_is_gone),
onSuccess = self._setupConventionCBGood,
onError = self._setupConventionCBError))
rmsgs.append(LogAggregator(self.conventionLeaderAddr))
# Check if this is part of convention leaders
if self.myAddress in self._conventionAddresses:
thesplog(' New leader %s located', self.myAddress, level=logging.DEBUG)
thesplog(' My address: %s', self.myAddress, level=logging.DEBUG)
rmsgs.append(TransmitIntent(self.myAddress, NewLeaderAvailable(self.myAddress)))
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
return rmsgs
def _setupConventionCBGood(self, result, finishedIntent):
self._sCBStats.inc('Admin Convention Registered')
if hasattr(self, '_conventionLeaderMissCount'):
delattr(self, '_conventionLeaderMissCount')
def _setupConventionCBError(self, result, finishedIntent):
self._sCBStats.inc('Admin Convention Registration Failed')
if hasattr(self, '_conventionLeaderMissCount'):
self._conventionLeaderMissCount += 1
else:
self._conventionLeaderMissCount = 1
thesplog('Admin cannot register with convention @ %s (miss %d): %s',
finishedIntent.targetAddr,
self._conventionLeaderMissCount,
result, level=logging.WARNING, primary=True)
def got_convention_invite(self, sender):
thesplog('Got Convention invite from %s', sender, level=logging.DEBUG)
#TODO - Append sender to list?
self._conventionAddress = sender
self._invited = True
return self.setup_convention()
def got_convention_register(self, regmsg):
# Called when remote convention member has sent a ConventionRegister message
self._sCBStats.inc('Admin Handle Convention Registration')
if self._invited and not self.conventionLeaderAddr:
# Lost connection to an invitation-only convention.
# Cannot join again until another invitation is received.
return []
# Registrant may re-register if changing capabilities
rmsgs = []
registrant = regmsg.adminAddress
prereg = getattr(regmsg, 'preRegister', False) # getattr used; see definition
existing = self._conventionMembers.find(registrant)
thesplog('Got Convention %sregistration from %s (%s) (new? %s)',
'pre-' if prereg else '',
registrant,
'first time' if regmsg.firstTime else 're-registering',
not existing,
level=logging.DEBUG)
if registrant == self.myAddress:
# Either remote failed getting an external address and is
# using 127.0.0.1 or else this is a malicious attempt to
# make us talk to ourselves. Ignore it.
thesplog('Convention registration from %s is an invalid address, ignoring.',
registrant,
level=logging.WARNING)
return rmsgs
if self.myAddress in self._conventionAddresses:
thesplog(' Identified self(%s) as a leader', self.myAddress, level=logging.DEBUG)
rmsgs.append(TransmitIntent(self.myAddress, NewLeaderAvailable(self.myAddress)))
existingPreReg = (
# existing.preRegOnly
# or existing.preRegistered
existing.permanentEntry
) if existing else False
notify = (not existing or existing.preRegOnly) and not prereg
if regmsg.firstTime or not existing:
if existing:
existing = None
notify = not prereg
rmsgs.extend(self._remote_system_cleanup(registrant))
newmember = ConventionMemberData(registrant,
regmsg.capabilities,
prereg)
if prereg or existingPreReg:
newmember.preRegistered = PreRegistration()
self._conventionMembers.add(registrant, newmember)
else:
existing.refresh(regmsg.capabilities, prereg or existingPreReg)
if not prereg:
existing.preRegOnly = False
if not self.isConventionLeader():
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
# Convention Members normally periodically initiate a
# membership message, to which the leader confirms by
# responding; if this was a pre-registration, that identifies
# this system as the "leader" for that remote. Also, if the
# remote sent this because it was a pre-registration leader,
# it doesn't yet have all the member information so the member
# should respond.
#if self.isConventionLeader() or prereg or regmsg.firstTime:
if prereg:
rmsgs.append(HysteresisCancel(registrant))
rmsgs.append(TransmitIntent(registrant, ConventionInvite()))
elif (self.isConventionLeader() or prereg or regmsg.firstTime or \
(existing and existing.permanentEntry)):
# If we are the Convention Leader, this would be the point to
# inform all other registrants of the new registrant. At
# present, there is no reciprocity here, so just update the
# new registrant with the leader's info.
rmsgs.append(
TransmitIntent(registrant,
ConventionRegister(self.myAddress,
self.capabilities)))
if notify:
rmsgs.extend(self._notifications_of(
ActorSystemConventionUpdate(registrant,
regmsg.capabilities,
True)))
return rmsgs
def _notifications_of(self, msg):
return [TransmitIntent(H, msg) for H in self._conventionNotificationHandlers]
def add_notification_handler(self, addr):
if addr not in self._conventionNotificationHandlers:
self._conventionNotificationHandlers.append(addr)
# Now update the registrant on the current state of all convention members
return [TransmitIntent(addr,
ActorSystemConventionUpdate(M.remoteAddress,
M.remoteCapabilities,
True))
for M in self._conventionMembers.values()
if not M.preRegOnly]
return []
def remove_notification_handler(self, addr):
self._conventionNotificationHandlers = [
H for H in self._conventionNotificationHandlers
if H != addr]
def got_convention_deregister(self, deregmsg):
self._sCBStats.inc('Admin Handle Convention De-registration')
remoteAdmin = deregmsg.adminAddress
if remoteAdmin == self.myAddress:
# Either remote failed getting an external address and is
# using 127.0.0.1 or else this is a malicious attempt to
# make us talk to ourselves. Ignore it.
thesplog('Convention deregistration from %s is an invalid address; ignoring.',
remoteAdmin,
level=logging.WARNING)
rmsgs = []
if getattr(deregmsg, 'preRegistered', False): # see definition for getattr use
existing = self._conventionMembers.find(remoteAdmin)
if existing:
existing.preRegistered = None
rmsgs.append(TransmitIntent(remoteAdmin, ConventionDeRegister(self.myAddress)))
return rmsgs + self._remote_system_cleanup(remoteAdmin)
def got_system_shutdown(self):
return self.exit_convention()
def exit_convention(self):
self.invited = False
gen_ops = lambda addr: [HysteresisCancel(addr),
TransmitIntent(addr,
ConventionDeRegister(self.myAddress)),
]
terminate = lambda a: [ self._remote_system_cleanup(a), gen_ops(a) ][-1]
if self.conventionLeaderAddr and \
self.conventionLeaderAddr != self.myAddress:
thesplog('Admin de-registering with Convention @ %s',
str(self.conventionLeaderAddr),
level=logging.INFO, primary=True)
# Cache convention leader address because it might get reset by terminate()
claddr = self.conventionLeaderAddr
terminate(self.conventionLeaderAddr)
return gen_ops(claddr)
return join(fmap(terminate,
[M.remoteAddress
for M in self._conventionMembers.values()
if M.remoteAddress != self.myAddress]))
def check_convention(self):
ct = currentTime()
rmsgs = []
if self._has_been_activated:
rmsgs = foldl(lambda x, y: x + y,
[self._check_preregistered_ping(ct, member)
for member in self._conventionMembers.values()],
self._convention_leader_checks(ct)
if self.isConventionLeader() or
not self.conventionLeaderAddr else
self._convention_member_checks(ct))
if self._conventionRegistration.view(ct).expired():
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
return rmsgs
def _convention_leader_checks(self, ct):
return foldl(lambda x, y: x + y,
[self._missed_checkin_remote_cleanup(R)
for R in [ member
for member in self._conventionMembers.values()
if member.registryValid.view(ct).expired() ]],
[])
def _missed_checkin_remote_cleanup(self, remote_member):
thesplog('%s missed %d checkins (%s); assuming it has died',
str(remote_member),
CONVENTION_REGISTRATION_MISS_MAX,
str(remote_member.registryValid),
level=logging.WARNING, primary=True)
return self._remote_system_cleanup(remote_member.remoteAddress)
def _convention_member_checks(self, ct):
rmsgs = []
# Re-register with the Convention if it's time
if self.conventionLeaderAddr and \
self._conventionRegistration.view(ct).expired():
if getattr(self, '_conventionLeaderMissCount', 0) >= \
CONVENTION_REGISTRATION_MISS_MAX:
thesplog('Admin convention registration lost @ %s (miss %d)',
self.conventionLeaderAddr,
self._conventionLeaderMissCount,
level=logging.WARNING, primary=True)
rmsgs.extend(self._remote_system_cleanup(self.conventionLeaderAddr))
#Re-elect leader since convention registration seems to be lost
new_leader_actr_addr = self._re_elect_leader()
rmsgs.append(TransmitIntent(new_leader_actr_addr, ConventionRegister(self.myAddress, self.capabilities, True)))
self._conventionLeaderMissCount = 0
else:
rmsgs.extend(self.setup_convention())
return rmsgs
def _re_elect_leader(self):
# Update about the last known leader
old_ldr_located = False
last_knwn_ldr = self.conventionLeaderAddr
for each in self._current_avlbl_leaders:
if last_knwn_ldr == each['actor_address']:
each['status'] = 'DOWN'
each['last_known_ts'] = int(datetime.utcnow().strftime('%Y%m%d%H%M%S%f')[:-3])
old_ldr_located = True
thesplog(' Marked current leader %s as DOWN', last_knwn_ldr, level=logging.DEBUG)
break
if not old_ldr_located:
thesplog(' Unable to mark %s as DOWN in convention leaders list', str(self.conventionLeaderAddr), level=logging.WARNING)
# Print the status, just for convenience
self._current_leader_stats()
# Mark new leader
new_ldr_located = False
new_ldr_addr = None
for idx, curr_ldr in enumerate(self._current_avlbl_leaders):
if curr_ldr['status'] == 'UP':
new_ldr_addr = curr_ldr['actor_address']
self._convntn_ipv4_marker = idx
new_ldr_located = True
thesplog(' Selected %s as next new leader', new_ldr_addr, level=logging.DEBUG)
break
if not new_ldr_located:
thesplog(' No convention leader is UP at this time', level=logging.WARNING)
return new_ldr_addr
def _check_preregistered_ping(self, ct, member):
if member.preRegistered and \
member.preRegistered.pingValid.view(ct).expired() and \
not member.preRegistered.pingPending:
member.preRegistered.pingPending = True
# If remote misses a checkin, re-extend the
# invitation. This also helps re-initiate a socket
# connection if a TxOnly socket has been lost.
member.preRegistered.pingValid = ExpirationTimer(
convention_reinvite_adjustment(
CONVENTION_RESTART_PERIOD
if member.registryValid.view(ct).expired()
else CONVENTION_REREGISTRATION_PERIOD))
return [HysteresisSend(member.remoteAddress,
ConventionInvite(),
onSuccess = self._preRegQueryNotPending,
onError = self._preRegQueryNotPending)]
return []
def _preRegQueryNotPending(self, result, finishedIntent):
remoteAddr = finishedIntent.targetAddr
member = self._conventionMembers.find(remoteAddr)
if member and member.preRegistered:
member.preRegistered.pingPending = False
def _remote_system_cleanup(self, registrant):
"""Called when a RemoteActorSystem has exited and all associated
Actors should be marked as exited and the ActorSystem
removed from Convention membership. This is also called on
a First Time connection from the remote to discard any
previous connection information.
"""
thesplog('Convention cleanup or deregistration for %s (known? %s)',
registrant,
bool(self._conventionMembers.find(registrant)),
level=logging.INFO)
rmsgs = [LostRemote(registrant)]
cmr = self._conventionMembers.find(registrant)
if not cmr or cmr.preRegOnly:
return []
# Send exited notification to conventionNotificationHandler (if any)
for each in self._conventionNotificationHandlers:
rmsgs.append(
TransmitIntent(each,
ActorSystemConventionUpdate(cmr.remoteAddress,
cmr.remoteCapabilities,
False))) # errors ignored
# If the remote ActorSystem shutdown gracefully (i.e. sent
# a Convention Deregistration) then it should not be
# necessary to shutdown remote Actors (or notify of their
# shutdown) because the remote ActorSystem should already
# have caused this to occur. However, it won't hurt, and
# it's necessary if the remote ActorSystem did not exit
# gracefully.
for lpa, raa in cmr.hasRemoteActors:
# ignore errors:
rmsgs.append(TransmitIntent(lpa, ChildActorExited(raa)))
# n.b. at present, this means that the parent might
# get duplicate notifications of ChildActorExited; it
# is expected that Actors can handle this.
# Remove remote system from conventionMembers
if not cmr.preRegistered:
if registrant == self.conventionLeaderAddr and self._invited:
#TODO - What needs to be done here?
self._conventionAddress = None
# Don't clear invited: once invited, that
# perpetually indicates this should be only a
# member and never a leader.
self._conventionMembers.rmv(registrant)
else:
# This conventionMember needs to stay because the
# current system needs to continue issuing
# registration pings. By setting the registryValid
# expiration to forever, this member won't re-time-out
# and will therefore be otherwise ignored... until it
# registers again at which point the membership will
# be updated with new settings.
cmr.registryValid = ExpirationTimer(None)
cmr.preRegOnly = True
return rmsgs + [HysteresisCancel(registrant)]
def sentByRemoteAdmin(self, envelope):
for each in self._conventionMembers.values():
if envelope.sender == each.remoteAddress:
return True
return False
def convention_inattention_delay(self, current_time):
return (self._conventionRegistration or
ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD
if self.active_in_convention() or
self.isConventionLeader() else
CONVENTION_RESTART_PERIOD)).view(current_time)
def forward_pending_to_remote_system(self, childClass, envelope, sourceHash, acceptsCaps):
alreadyTried = getattr(envelope.message, 'alreadyTried', [])
ct = currentTime()
remoteCandidates = [
K
for K in self._conventionMembers.values()
if not K.registryValid.view(ct).expired()
and K.remoteAddress != envelope.sender # source Admin
and K.remoteAddress not in alreadyTried
and acceptsCaps(K.remoteCapabilities)]
if not remoteCandidates:
if self.isConventionLeader() or not self.conventionLeaderAddr:
raise NoCompatibleSystemForActor(
childClass,
'No known ActorSystems can handle a %s for %s',
childClass, envelope.message.forActor)
# Let the Convention Leader try to find an appropriate ActorSystem
bestC = self.conventionLeaderAddr
else:
# distribute equally amongst candidates
C = [(K.remoteAddress, len(K.hasRemoteActors))
for K in remoteCandidates]
bestC = foldl(lambda best,possible:
best if best[1] <= possible[1] else possible,
C)[0]
thesplog('Requesting creation of %s%s on remote admin %s',
envelope.message.actorClassName,
' (%s)'%sourceHash if sourceHash else '',
bestC)
if bestC not in alreadyTried:
# Don't send request to this remote again, it has already
# been tried. This would also be indicated by that system
# performing the add of self.myAddress as below, but if
# there is disagreement between the local and remote
# addresses, this addition will prevent continual
# bounceback.
alreadyTried.append(bestC)
if self.myAddress not in alreadyTried:
# Don't send request back to this actor system: it cannot
# handle it
alreadyTried.append(self.myAddress)
envelope.message.alreadyTried = alreadyTried
return [TransmitIntent(bestC, envelope.message)]
def send_to_all_members(self, message, exception_list=None):
thesplog(' Sending to %i members', len(self._conventionMembers),level=logging.DEBUG)
return [HysteresisSend(M.remoteAddress, message)
for M in self._conventionMembers.values()
if M.remoteAddress not in (exception_list or [])]
def send_to_leaders_only(self, message, exception_list=[]):
thesplog(' Sending to %i leaders', len(self._conventionAddresses) - len(exception_list),level=logging.DEBUG)
return [HysteresisSend(L, message) for L in self._conventionAddresses if L not in (exception_list)]
def _current_leader_stats(self):
thesplog(' Current leader stats', level=logging.DEBUG)
for each in self._current_avlbl_leaders:
prettyfied_data = {k: each[k] for k in each.keys() if k != 'actor_address'}
thesplog(' %s', prettyfied_data, level=logging.DEBUG)
#Last known TS is only relevant for convention leaders
thesplog(' Last known TS: %s', str(self._avlbl_leader_last_knwn_ts), level=logging.DEBUG)
class ConventioneerAdmin(GlobalNamesAdmin):
"""Extends the AdminCore+GlobalNamesAdmin with ActorSystem Convention
functionality to support multi-host configurations.
"""
def __init__(self, *args, **kw):
super(ConventioneerAdmin, self).__init__(*args, **kw)
self._cstate = LocalConventionState(
self.myAddress,
self.capabilities,
self._sCBStats,
getattr(self.transport, 'getAllConventionAddresses', lambda c: None))
self._hysteresisSender = HysteresisDelaySender(self._send_intent)
def _updateStatusResponse(self, resp):
self._cstate.updateStatusResponse(resp)
super(ConventioneerAdmin, self)._updateStatusResponse(resp)
def _activate(self):
# Called internally when this ActorSystem has been initialized
# and should be activated for operations.
super(ConventioneerAdmin, self)._activate()
if self.isShuttingDown(): return
self._performIO(self._cstate.setup_convention(True))
def h_ConventionInvite(self, envelope):
if self.isShuttingDown(): return
self._performIO(self._cstate.got_convention_invite(envelope.sender))
return True
def h_ConventionRegister(self, envelope):
if self.isShuttingDown(): return
self._performIO(self._cstate.got_convention_register(envelope.message))
return True
def h_ConventionDeRegister(self, envelope):
self._performIO(self._cstate.got_convention_deregister(envelope.message))
return True
def h_NewLeaderAvailable(self, envelope):
thesplog(' New leader available, sender: %s, leader: %s, last known time stamp: %i', \
envelope.sender, str(envelope.message.adminAddress), envelope.message.lastKnownTS, \
level=logging.DEBUG)
# There are two possibilities when a leader would receive its own message
# 1. It issued an availability message and caught it in the handler.
# At that time it needs to consume the message in the same sequence, and (monotonically) increase the timestamp.
# 2. After it dissipated the leader available message across the system, the message bounced off other members and came back.
# At that time it needs to "stop the buck", i.e., prevent an infinite loop
if self.myAddress == envelope.message.adminAddress:
if envelope.message.lastKnownTS > self._cstate._avlbl_leader_last_knwn_ts:
thesplog(' Assigning new time stamp %s', str(envelope.message.lastKnownTS), level=logging.DEBUG)
self._cstate._avlbl_leader_last_knwn_ts = envelope.message.lastKnownTS
else:
thesplog(' This is a self-message (timestamp= %s), skipping', str(envelope.message.lastKnownTS), level=logging.DEBUG)
self._cstate._current_leader_stats()
return False
for idx, curr_leader in enumerate(self._cstate._current_avlbl_leaders):
if curr_leader['actor_address'] == envelope.message.adminAddress:
curr_leader['status'] = 'UP'
curr_leader['last_known_ts'] = envelope.message.lastKnownTS
thesplog(' Updated status and timestamp for %s', str(envelope.message.adminAddress), level=logging.DEBUG)
thesplog(' Current marker: %i', self._cstate._convntn_ipv4_marker, level=logging.DEBUG)
if idx < self._cstate._convntn_ipv4_marker:
last_leader_idx = self._cstate._convntn_ipv4_marker
self._cstate._convntn_ipv4_marker = idx
thesplog(' Removing last leader (%s)', self._cstate._current_avlbl_leaders[last_leader_idx]['address'], level=logging.DEBUG)
thesplog(' Selecting current leader (%s)', self._cstate._current_avlbl_leaders[idx]['address'], level=logging.DEBUG)
else:
#do nothing
thesplog(' Current leader (%s) gets to stay', self._cstate._current_avlbl_leaders[self._cstate._convntn_ipv4_marker]['address'], level=logging.DEBUG)
break
self._cstate._current_leader_stats()
if len(self._cstate._conventionMembers) == 0:
# This means current member is secluded and should at least reach known leaders
thesplog(' Trying to reach all (other) leaders', level=logging.DEBUG)
self._performIO(self._cstate.send_to_leaders_only(envelope.message, [envelope.sender]))
else:
# This is the regular scenario
self._performIO(self._cstate.send_to_all_members(envelope.message, [envelope.sender]))
return False
def h_SystemShutdown(self, envelope):
self._performIO(self._cstate.got_system_shutdown())
return super(ConventioneerAdmin, self).h_SystemShutdown(envelope)
return True
def _performIO(self, iolist):
for msg in iolist:
if isinstance(msg, HysteresisCancel):
self._hysteresisSender.cancelSends(msg.cancel_addr)
elif isinstance(msg, HysteresisSend):
#self._send_intent(msg)
self._hysteresisSender.sendWithHysteresis(msg)
elif isinstance(msg, LogAggregator):
if getattr(self, 'asLogger', None):
thesplog('Setting log aggregator of %s to %s', self.asLogger, msg.aggregatorAddress)
self._send_intent(TransmitIntent(self.asLogger, msg))
elif isinstance(msg, LostRemote):
if hasattr(self.transport, 'lostRemote'):
self.transport.lostRemote(msg.lost_addr)
else:
self._send_intent(msg)
def run(self):
# Main loop for convention management. Wraps the lower-level
# transport with a stop at the next needed convention
# registration period to re-register.
transport_continue = True
try:
while not getattr(self, 'shutdown_completed', False) and \
not isinstance(transport_continue, Thespian__Run_Terminated):
ct = currentTime()
delay = min(self._cstate.convention_inattention_delay(ct),
ExpirationTimer(None).view(ct) if self._hysteresisSender.delay.expired() else
self._hysteresisSender.delay
)
# n.b. delay does not account for soon-to-expire
# pingValids, but since delay will not be longer than
# a CONVENTION_REREGISTRATION_PERIOD, the worst case
# is a doubling of a pingValid period (which should be fine).
transport_continue = self.transport.run(self.handleIncoming,
delay.remaining())
# Check Convention status based on the elapsed time
self._performIO(self._cstate.check_convention())
self._hysteresisSender.checkSends()
self._remove_expired_sources()
except Exception as ex:
import traceback
thesplog('ActorAdmin uncaught exception: %s', traceback.format_exc(),
level=logging.ERROR, exc_info=True)
thesplog('Admin time to die', level=logging.DEBUG)
# ---- Source Hash Transfers --------------------------------------------------
def h_SourceHashTransferRequest(self, envelope):
sourceHash = envelope.message.sourceHash
src = self._sources.get(sourceHash, None)
if not src or not src.source_valid:
self._send_intent(
TransmitIntent(envelope.sender,
SourceHashTransferReply(sourceHash)))
else:
# Older requests did not have the prefer_original field;
# maintain backward compatibility
orig = getattr(envelope.message, 'prefer_original', False)
self._send_intent(
TransmitIntent(
envelope.sender,
SourceHashTransferReply(
sourceHash,
src.orig_data if orig else src.zipsrc,
src.srcInfo,
original_form = orig)))
return True
def h_SourceHashTransferReply(self, envelope):
sourceHash = envelope.message.sourceHash
if sourceHash not in self._sources:
return True
if envelope.message.isValid():
# nb.. original_form added; use getattr for backward compatibility
if getattr(envelope.message, 'original_form', False):
if self._sourceAuthority:
self._send_intent(
TransmitIntent(
self._sourceAuthority,
ValidateSource(sourceHash,
envelope.message.sourceData,
getattr(envelope.message,
'sourceInfo', None))))
return True
else:
self._loadValidatedActorSource(sourceHash,
envelope.message.sourceData,
# sourceInfo added; backward compat.
getattr(envelope.message,
'sourceInfo', None))
return True
self._cancel_pending_actors(self._sources[sourceHash].pending_actors)
del self._sources[sourceHash]
return True
def h_ValidateSource(self, envelope):
if not envelope.message.sourceData and \
envelope.sender != self._cstate.conventionLeaderAddr:
# Propagate source unload requests to all convention members
self._performIO(
self._cstate.send_to_all_members(
envelope.message,
# Do not propagate if this is where the
# notification came from; prevents indefinite
# bouncing of this message as long as the
# convention structure is a DAG.
[envelope.sender]))
super(ConventioneerAdmin, self).h_ValidateSource(envelope)
return False # might have sent with hysteresis, so break out to local _run
def _acceptsRemoteLoadedSourcesFrom(self, pendingActorEnvelope):
allowed = self.capabilities.get('AllowRemoteActorSources', 'yes')
return allowed.lower() == 'yes' or \
(allowed == 'LeaderOnly' and
pendingActorEnvelope.sender == self._cstate.conventionLeaderAddr)
# ---- Remote Actor interactions ----------------------------------------------
def _not_compatible(self, createActorEnvelope):
# Called when the current Actor System is not compatible with
# the Actor's actorSystemCapabilityCheck. Forward this
# createActor request to another system that it's compatible
# with.
sourceHash = createActorEnvelope.message.sourceHash
childRequirements = createActorEnvelope.message.targetActorReq
childCName = createActorEnvelope.message.actorClassName
childClass = actualActorClass(childCName,
partial(loadModuleFromHashSource,
sourceHash,
self._sources)
if sourceHash else None)
acceptsCaps = lambda caps: checkActorCapabilities(childClass, caps,
childRequirements)
if createActorEnvelope.message.forActor is None:
# Request from external; use sender address
createActorEnvelope.message.forActor = createActorEnvelope.sender
iolist = self._cstate.forward_pending_to_remote_system(
childClass, createActorEnvelope, sourceHash, acceptsCaps)
for each in iolist:
# Expected to be only one; if the transmit fails,
# route it back here so that the next possible
# remote can be tried.
each.addCallback(onFailure=self._pending_send_failed)
self._performIO(iolist)
return True
def _get_missing_source_for_hash(self, sourceHash, createActorEnvelope):
# If this request was forwarded by a remote Admin and the
# sourceHash is not known locally, request it from the sending
# remote Admin
if self._cstate.sentByRemoteAdmin(createActorEnvelope) and \
self._acceptsRemoteLoadedSourcesFrom(createActorEnvelope):
self._sources[sourceHash] = PendingSource(sourceHash, None)
self._sources[sourceHash].pending_actors.append(createActorEnvelope)
self._hysteresisSender.sendWithHysteresis(
TransmitIntent(
createActorEnvelope.sender,
SourceHashTransferRequest(sourceHash,
bool(self._sourceAuthority))))
# sent with hysteresis, so break out to local _run
return False
# No remote Admin to send the source, so fail as normal.
return super(ConventioneerAdmin, self)._get_missing_source_for_hash(
sourceHash,
createActorEnvelope)
def _pending_send_failed(self, result, intent):
self.h_PendingActor(ReceiveEnvelope(msg=intent.message, sender=self.myAddress))
def h_NotifyOnSystemRegistration(self, envelope):
if envelope.message.enableNotification:
self._performIO(
self._cstate.add_notification_handler(envelope.sender))
else:
self._cstate.remove_notification_handler(envelope.sender)
return True
def h_PoisonMessage(self, envelope):
self._cstate.remove_notification_handler(envelope.sender)
def _handleChildExited(self, childAddr):
self._cstate.remove_notification_handler(childAddr)
return super(ConventioneerAdmin, self)._handleChildExited(childAddr)
def h_CapabilityUpdate(self, envelope):
msg = envelope.message
updateLocals = self._updSystemCapabilities(msg.capabilityName,
msg.capabilityValue)
if not self.isShuttingDown():
self._performIO(
self._cstate.capabilities_have_changed(self.capabilities))
if updateLocals:
self._capUpdateLocalActors()
return False # might have sent with Hysteresis, so return to _run loop here
|
[] |
[] |
[
"CONVENTION_REGISTRATION_MISS_MAX",
"CONVENTION_RESTART_PERIOD",
"CONVENTION_REREGISTRATION_PERIOD"
] |
[]
|
["CONVENTION_REGISTRATION_MISS_MAX", "CONVENTION_RESTART_PERIOD", "CONVENTION_REREGISTRATION_PERIOD"]
|
python
| 3 | 0 | |
nodeup/pkg/model/secrets_test.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"testing"
"k8s.io/kops/upup/pkg/fi"
)
func TestSecretBuilder(t *testing.T) {
RunGoldenTest(t, "tests/golden/minimal", "secret", func(nodeupModelContext *NodeupModelContext, target *fi.ModelBuilderContext) error {
builder := SecretBuilder{NodeupModelContext: nodeupModelContext}
return builder.Build(target)
})
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
flyway-core/src/main/java/org/flywaydb/core/internal/scanner/cloud/gcs/GCSScanner.java
|
/*
* Copyright 2010-2020 Redgate Software Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flywaydb.core.internal.scanner.cloud.gcs;
import com.google.api.gax.paging.Page;
import com.google.cloud.storage.Blob;
import com.google.cloud.storage.Bucket;
import com.google.cloud.storage.Storage;
import com.google.cloud.storage.StorageOptions;
import org.flywaydb.core.api.Location;
import org.flywaydb.core.api.logging.Log;
import org.flywaydb.core.api.logging.LogFactory;
import org.flywaydb.core.internal.resource.LoadableResource;
import org.flywaydb.core.internal.resource.gcs.GCSResource;
import org.flywaydb.core.internal.scanner.cloud.CloudScanner;
import software.amazon.awssdk.core.exception.SdkClientException;
import java.nio.charset.Charset;
import java.util.*;
public class GCSScanner extends CloudScanner {
private static final Log LOG = LogFactory.getLog(GCSScanner.class);
/**
* Creates a new GCS scanner.
*
* @param encoding The encoding to use.
*/
public GCSScanner(Charset encoding) {
super(encoding);
}
@Override
public Collection<LoadableResource> scanForResources(final Location location) {
if (System.getenv("GOOGLE_APPLICATION_CREDENTIALS") == null) {
LOG.error("Can't read location " + location + "; GOOGLE_APPLICATION_CREDENTIALS environment variable not set");
return Collections.emptyList();
}
String bucketName = getBucketName(location);
Storage storage = StorageOptions.getDefaultInstance().getService();
Bucket bucket = storage.get(bucketName);
return getLoadableResources(bucketName, bucket.list());
}
private Collection<LoadableResource> getLoadableResources(String bucketName, Page<Blob> listObjectResult) {
Set<LoadableResource> resources = new TreeSet<>();
for (Blob blob : listObjectResult.iterateAll()) {
LOG.debug("Found GCS resource: " + bucketName.concat("/").concat(blob.getName()));
resources.add(new GCSResource(blob, encoding));
}
return resources;
}
}
|
[
"\"GOOGLE_APPLICATION_CREDENTIALS\""
] |
[] |
[
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["GOOGLE_APPLICATION_CREDENTIALS"]
|
java
| 1 | 0 | |
Examples/AWSSCV-SampleContactFlows/Code/awsscv_samples/awsscv_samples.py
|
import sys
from pip._internal import main
main(['install', 'boto3', '--target', '/tmp/'])
sys.path.insert(0, '/tmp/')
import os, json, logging, boto3, urllib3, calendar, time, cfnresponse
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(os.getenv('lambda_logging_level', 'DEBUG')))
http = urllib3.PoolManager()
sub_map = {}
contact_flow_map = {}
results = []
def lambda_handler(event, context):
logger.debug(event)
try:
if event['RequestType'] == 'Delete':
cfnresponse.send(event, context, cfnresponse.SUCCESS, {})
return {
'result': 'success',
'event': 'Delete'
}
task_root = os.environ['LAMBDA_TASK_ROOT']
sub_map['%%CONNECT_BASIC_QUEUE_ARN%%'] = os.getenv('connect_basic_queue_arn')
sub_map['%%INVOKE_TELEPHONY_FUNCTION_ARN%%'] = os.getenv('invoke_telephony_function_arn')
sub_map['%%INVOKE_SALESFORCE_REST_API_FUNCTION_ARN%%'] = os.getenv('invoke_salesforce_rest_api_function_arn')
sub_map['%%KVS_CONSUMER_TRIGGER_ARN%%'] = os.getenv('kvs_consumer_trigger_arn')
ts = calendar.timegm(time.gmtime())
# Process AWS_Sample_SCV_Agent_Transfer
with open(os.path.join(task_root, 'AWS_Sample_SCV_Agent_Transfer.json')) as f:
json_object = json.load(f)
result = create_contact_flow(os.getenv('connect_instance_id'), json_object, ts)
time.sleep(2)
# Process AWSSCVAgentWhisper
with open(os.path.join(task_root, 'AWSSCVAgentWhisper.json')) as f:
json_object = json.load(f)
result = create_contact_flow(os.getenv('connect_instance_id'), json_object, ts)
time.sleep(2)
# Process AWSSCVAgentWhisperWithStreaming
with open(os.path.join(task_root, 'AWSSCVAgentWhisperWithStreaming.json')) as f:
json_object = json.load(f)
result = create_contact_flow(os.getenv('connect_instance_id'), json_object, ts)
time.sleep(2)
# Process AWS_Sample_SCV_Queue_Transfer
with open(os.path.join(task_root, 'AWS_Sample_SCV_Queue_Transfer.json')) as f:
json_object = json.load(f)
result = create_contact_flow(os.getenv('connect_instance_id'), json_object, ts)
time.sleep(2)
# Process AWS_Sample_SCV_REST_Example
with open(os.path.join(task_root, 'AWS_Sample_SCV_REST_Example.json')) as f:
json_object = json.load(f)
result = create_contact_flow(os.getenv('connect_instance_id'), json_object, ts)
time.sleep(2)
# Process AWS_Sample_SCV_Inbound
with open(os.path.join(task_root, 'AWS_Sample_SCV_Inbound.json')) as f:
json_object = json.load(f)
result = create_contact_flow(os.getenv('connect_instance_id'), json_object, ts)
time.sleep(2)
# Process AWS_Sample_SCV_Inbound_Flow_with_Transcription
with open(os.path.join(task_root, 'AWS_Sample_SCV_Inbound_Flow_with_Transcription.json')) as f:
json_object = json.load(f)
result = create_contact_flow(os.getenv('connect_instance_id'), json_object, ts)
time.sleep(2)
# Process AWS_Sample_SCV_Outbound_Flow_with_Transcription
with open(os.path.join(task_root, 'AWS_Sample_SCV_Outbound_Flow_with_Transcription.json')) as f:
json_object = json.load(f)
result = create_contact_flow(os.getenv('connect_instance_id'), json_object, ts)
time.sleep(2)
cfnresponse.send(event, context, cfnresponse.SUCCESS, {})
return contact_flow_map
except Exception as e:
logger.error(e)
cfnresponse.send(event, context, cfnresponse.SUCCESS, {})
def create_contact_flow(connect_instance_id, json_object, ts):
name = json_object['ContactFlow']['Name']
type = json_object['ContactFlow']['Type']
description = json_object['ContactFlow']['Description']
sub_content = json_object['ContactFlow']['Content']
for key, value in sub_map.items():
sub_content = sub_content.replace(key, value)
try:
logger.debug(sub_content)
result = boto3.client('connect').create_contact_flow(
InstanceId=connect_instance_id,
Name=name + '-' + str(ts),
Type=type,
Description=description,
Content=sub_content
)
sub_map['%%' + name + '%%'] = result['ContactFlowArn']
contact_flow_map[name] = { 'ContactFlowId': result['ContactFlowId'], 'ContactFlowArn': result['ContactFlowArn'] }
return result
except Exception as e:
logger.error(e)
logger.error(name)
logger.error(sub_content)
|
[] |
[] |
[
"kvs_consumer_trigger_arn",
"LAMBDA_TASK_ROOT",
"invoke_salesforce_rest_api_function_arn",
"lambda_logging_level",
"connect_basic_queue_arn",
"invoke_telephony_function_arn",
"connect_instance_id"
] |
[]
|
["kvs_consumer_trigger_arn", "LAMBDA_TASK_ROOT", "invoke_salesforce_rest_api_function_arn", "lambda_logging_level", "connect_basic_queue_arn", "invoke_telephony_function_arn", "connect_instance_id"]
|
python
| 7 | 0 | |
lib/airflow/tests/providers/amazon/aws/operators/test_batch.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pylint: disable=missing-docstring
import unittest
from unittest import mock
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.batch_client import AwsBatchClientHook
from airflow.providers.amazon.aws.operators.batch import AwsBatchOperator
# Use dummy AWS credentials
AWS_REGION = "eu-west-1"
AWS_ACCESS_KEY_ID = "airflow_dummy_key"
AWS_SECRET_ACCESS_KEY = "airflow_dummy_secret"
JOB_NAME = "51455483-c62c-48ac-9b88-53a6a725baa3"
JOB_ID = "8ba9d676-4108-4474-9dca-8bbac1da9b19"
RESPONSE_WITHOUT_FAILURES = {
"jobName": JOB_NAME,
"jobId": JOB_ID,
}
class TestAwsBatchOperator(unittest.TestCase):
MAX_RETRIES = 2
STATUS_RETRIES = 3
@mock.patch.dict("os.environ", AWS_DEFAULT_REGION=AWS_REGION)
@mock.patch.dict("os.environ", AWS_ACCESS_KEY_ID=AWS_ACCESS_KEY_ID)
@mock.patch.dict("os.environ", AWS_SECRET_ACCESS_KEY=AWS_SECRET_ACCESS_KEY)
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.AwsBaseHook.get_client_type")
def setUp(self, get_client_type_mock):
self.get_client_type_mock = get_client_type_mock
self.batch = AwsBatchOperator(
task_id="task",
job_name=JOB_NAME,
job_queue="queue",
job_definition="hello-world",
max_retries=self.MAX_RETRIES,
status_retries=self.STATUS_RETRIES,
parameters=None,
overrides={},
array_properties=None,
aws_conn_id='airflow_test',
region_name="eu-west-1",
)
self.client_mock = self.get_client_type_mock.return_value
self.assertEqual(self.batch.hook.client, self.client_mock) # setup client property
# don't pause in unit tests
self.mock_delay = mock.Mock(return_value=None)
self.batch.delay = self.mock_delay
self.mock_exponential_delay = mock.Mock(return_value=0)
self.batch.exponential_delay = self.mock_exponential_delay
# Assign a job ID for most tests, so they don't depend on a job submission.
self.assertIsNone(self.batch.job_id)
self.batch.job_id = JOB_ID
def test_init(self):
self.assertEqual(self.batch.job_id, JOB_ID)
self.assertEqual(self.batch.job_name, JOB_NAME)
self.assertEqual(self.batch.job_queue, "queue")
self.assertEqual(self.batch.job_definition, "hello-world")
self.assertEqual(self.batch.waiters, None)
self.assertEqual(self.batch.hook.max_retries, self.MAX_RETRIES)
self.assertEqual(self.batch.hook.status_retries, self.STATUS_RETRIES)
self.assertEqual(self.batch.parameters, {})
self.assertEqual(self.batch.overrides, {})
self.assertEqual(self.batch.array_properties, {})
self.assertEqual(self.batch.hook.region_name, "eu-west-1")
self.assertEqual(self.batch.hook.aws_conn_id, "airflow_test")
self.assertEqual(self.batch.hook.client, self.client_mock)
self.get_client_type_mock.assert_called_once_with("batch", region_name="eu-west-1")
def test_template_fields_overrides(self):
self.assertEqual(
self.batch.template_fields,
(
"job_name",
"overrides",
"parameters",
),
)
@mock.patch.object(AwsBatchClientHook, "wait_for_job")
@mock.patch.object(AwsBatchClientHook, "check_job_success")
def test_execute_without_failures(self, check_mock, wait_mock):
# JOB_ID is in RESPONSE_WITHOUT_FAILURES
self.client_mock.submit_job.return_value = RESPONSE_WITHOUT_FAILURES
self.batch.job_id = None
self.batch.waiters = None # use default wait
self.batch.execute(None)
self.client_mock.submit_job.assert_called_once_with(
jobQueue="queue",
jobName=JOB_NAME,
containerOverrides={},
jobDefinition="hello-world",
arrayProperties={},
parameters={},
)
self.assertEqual(self.batch.job_id, JOB_ID)
wait_mock.assert_called_once_with(JOB_ID)
check_mock.assert_called_once_with(JOB_ID)
def test_execute_with_failures(self):
self.client_mock.submit_job.return_value = ""
with self.assertRaises(AirflowException):
self.batch.execute(None)
self.client_mock.submit_job.assert_called_once_with(
jobQueue="queue",
jobName=JOB_NAME,
containerOverrides={},
jobDefinition="hello-world",
arrayProperties={},
parameters={},
)
@mock.patch.object(AwsBatchClientHook, "check_job_success")
def test_wait_job_complete_using_waiters(self, check_mock):
mock_waiters = mock.Mock()
self.batch.waiters = mock_waiters
self.client_mock.submit_job.return_value = RESPONSE_WITHOUT_FAILURES
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": "SUCCEEDED"}]}
self.batch.execute(None)
mock_waiters.wait_for_job.assert_called_once_with(JOB_ID)
check_mock.assert_called_once_with(JOB_ID)
def test_kill_job(self):
self.client_mock.terminate_job.return_value = {}
self.batch.on_kill()
self.client_mock.terminate_job.assert_called_once_with(jobId=JOB_ID, reason="Task killed by the user")
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode"]
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = ["adafruit_bus_device", "micropython", "adafruit_register"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"BusDevice": (
"https://circuitpython.readthedocs.io/projects/busdevice/en/latest/",
None,
),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit BNO055 Library"
copyright = "2017 Radomir Dopieralski"
author = "Radomir Dopieralski"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitBNO055Librarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitBNO055Library.tex",
"Adafruit BNO055 Library Documentation",
"Radomir Dopieralski",
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"adafruitBNO055library",
"Adafruit BNO055 Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitBNO055Library",
"Adafruit BNO055 Library Documentation",
author,
"AdafruitBNO055Library",
"One line description of project.",
"Miscellaneous",
)
]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
pkg/config/config.go
|
package config
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
)
// Config defines the structure of the config files
type Config struct {
General map[string]interface{} `json:"general"`
Stages map[string]map[string]interface{} `json:"stages"`
Steps map[string]map[string]interface{} `json:"steps"`
}
// StepConfig defines the structure for merged step configuration
type StepConfig struct {
Config map[string]interface{}
}
// ReadConfig loads config and returns its content
func (c *Config) ReadConfig(configuration io.ReadCloser) error {
defer configuration.Close()
content, err := ioutil.ReadAll(configuration)
if err != nil {
return errors.Wrapf(err, "error reading %v", configuration)
}
err = yaml.Unmarshal(content, &c)
if err != nil {
return NewParseError(fmt.Sprintf("error unmarshalling %q: %v", content, err))
}
return nil
}
// ApplyAliasConfig adds configuration values available on aliases to primary configuration parameters
func (c *Config) ApplyAliasConfig(parameters []StepParameters, filters StepFilters, stageName, stepName string) {
for _, p := range parameters {
c.General = setParamValueFromAlias(c.General, filters.General, p)
if c.Stages[stageName] != nil {
c.Stages[stageName] = setParamValueFromAlias(c.Stages[stageName], filters.Stages, p)
}
if c.Steps[stepName] != nil {
c.Steps[stepName] = setParamValueFromAlias(c.Steps[stepName], filters.Steps, p)
}
}
}
func setParamValueFromAlias(configMap map[string]interface{}, filter []string, p StepParameters) map[string]interface{} {
if configMap[p.Name] == nil && sliceContains(filter, p.Name) {
for _, a := range p.Aliases {
configMap[p.Name] = getDeepAliasValue(configMap, a.Name)
if configMap[p.Name] != nil {
return configMap
}
}
}
return configMap
}
func getDeepAliasValue(configMap map[string]interface{}, key string) interface{} {
parts := strings.Split(key, "/")
if len(parts) > 1 {
if configMap[parts[0]] == nil {
return nil
}
return getDeepAliasValue(configMap[parts[0]].(map[string]interface{}), strings.Join(parts[1:], "/"))
}
return configMap[key]
}
// GetStepConfig provides merged step configuration using defaults, config, if available
func (c *Config) GetStepConfig(flagValues map[string]interface{}, paramJSON string, configuration io.ReadCloser, defaults []io.ReadCloser, filters StepFilters, parameters []StepParameters, stageName, stepName string) (StepConfig, error) {
var stepConfig StepConfig
var d PipelineDefaults
if err := c.ReadConfig(configuration); err != nil {
switch err.(type) {
case *ParseError:
return StepConfig{}, errors.Wrap(err, "failed to parse custom pipeline configuration")
default:
//ignoring unavailability of config file since considered optional
}
}
c.ApplyAliasConfig(parameters, filters, stageName, stepName)
if err := d.ReadPipelineDefaults(defaults); err != nil {
switch err.(type) {
case *ParseError:
return StepConfig{}, errors.Wrap(err, "failed to parse pipeline default configuration")
default:
//ignoring unavailability of defaults since considered optional
}
}
// first: read defaults & merge general -> steps (-> general -> steps ...)
for _, def := range d.Defaults {
def.ApplyAliasConfig(parameters, filters, stageName, stepName)
stepConfig.mixIn(def.General, filters.General)
stepConfig.mixIn(def.Steps[stepName], filters.Steps)
}
// second: read config & merge - general -> steps -> stages
stepConfig.mixIn(c.General, filters.General)
stepConfig.mixIn(c.Steps[stepName], filters.Steps)
stepConfig.mixIn(c.Stages[stageName], filters.Stages)
// third: merge parameters provided via env vars
stepConfig.mixIn(envValues(filters.All), filters.All)
// fourth: if parameters are provided in JSON format merge them
if len(paramJSON) != 0 {
var params map[string]interface{}
json.Unmarshal([]byte(paramJSON), ¶ms)
//apply aliases
for _, p := range parameters {
params = setParamValueFromAlias(params, filters.Parameters, p)
}
stepConfig.mixIn(params, filters.Parameters)
}
// fifth: merge command line flags
if flagValues != nil {
stepConfig.mixIn(flagValues, filters.Parameters)
}
return stepConfig, nil
}
// GetStepConfigWithJSON provides merged step configuration using a provided stepConfigJSON with additional flags provided
func GetStepConfigWithJSON(flagValues map[string]interface{}, stepConfigJSON string, filters StepFilters) StepConfig {
var stepConfig StepConfig
stepConfigMap := map[string]interface{}{}
json.Unmarshal([]byte(stepConfigJSON), &stepConfigMap)
stepConfig.mixIn(stepConfigMap, filters.All)
// ToDo: mix in parametersJSON
if flagValues != nil {
stepConfig.mixIn(flagValues, filters.Parameters)
}
return stepConfig
}
// GetJSON returns JSON representation of an object
func GetJSON(data interface{}) (string, error) {
result, err := json.Marshal(data)
if err != nil {
return "", errors.Wrapf(err, "error marshalling json: %v", err)
}
return string(result), nil
}
func envValues(filter []string) map[string]interface{} {
vals := map[string]interface{}{}
for _, param := range filter {
if envVal := os.Getenv("PIPER_" + param); len(envVal) != 0 {
vals[param] = os.Getenv("PIPER_" + param)
}
}
return vals
}
func (s *StepConfig) mixIn(mergeData map[string]interface{}, filter []string) {
if s.Config == nil {
s.Config = map[string]interface{}{}
}
s.Config = filterMap(merge(s.Config, mergeData), filter)
}
func filterMap(data map[string]interface{}, filter []string) map[string]interface{} {
result := map[string]interface{}{}
if data == nil {
data = map[string]interface{}{}
}
for key, value := range data {
if len(filter) == 0 || sliceContains(filter, key) {
result[key] = value
}
}
return result
}
func merge(base, overlay map[string]interface{}) map[string]interface{} {
result := map[string]interface{}{}
if base == nil {
base = map[string]interface{}{}
}
for key, value := range base {
result[key] = value
}
for key, value := range overlay {
if val, ok := value.(map[string]interface{}); ok {
if valBaseKey, ok := base[key].(map[string]interface{}); !ok {
result[key] = merge(map[string]interface{}{}, val)
} else {
result[key] = merge(valBaseKey, val)
}
} else {
result[key] = value
}
}
return result
}
func sliceContains(slice []string, find string) bool {
for _, elem := range slice {
if elem == find {
return true
}
}
return false
}
|
[
"\"PIPER_\" + param",
"\"PIPER_\" + param"
] |
[] |
[
"PIPER_\" + para"
] |
[]
|
["PIPER_\" + para"]
|
go
| 1 | 0 | |
src/examplePlugins/tasks_approved.py
|
# Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
# See docs folder for detailed usage info.
import os
import shotgun_api3
def registerCallbacks(reg):
"""
Register our callbacks.
:param reg: A Registrar instance provided by the event loop handler.
"""
# Grab authentication env vars for this plugin. Install these into the env
# if they don't already exist.
server = os.environ["SG_SERVER"]
script_name = os.environ["SGDAEMON_TASKSAPPROVED_NAME"]
script_key = os.environ["SGDAEMON_TASKSAPPROVED_KEY"]
args = {
"task_status_field": "sg_status_list",
"task_status": ["fin"],
"upstream_tasks_field": "upstream_tasks",
"downstream_tasks_field": "downstream_tasks",
"downstream_task_status_activate": ["wtg"],
"downstream_task_status_active": "ip",
"downstream_task_status_recurse": ["na"],
"note_status_field": "sg_status_list",
"close_notes": True,
"closed_note_status": "clsd",
}
# Grab an sg connection for the validator.
sg = shotgun_api3.Shotgun(server, script_name=script_name, api_key=script_key)
# Bail if our validator fails.
if not is_valid(sg, reg.logger, args):
reg.logger.warning("Plugin is not valid, will not register callback.")
return
# Register our callback with the Shotgun_%s_Change event and tell the logger
# about it.
reg.registerCallback(
script_name,
script_key,
tasks_approved,
{"Shotgun_Task_Change": args["task_status_field"]},
args,
)
reg.logger.debug("Registered callback.")
def is_valid(sg, logger, args):
"""
Validate our args.
:param sg: Shotgun API handle.
:param logger: Logger instance.
:param args: Any additional misc arguments passed through this plugin.
:returns: True if plugin is valid, None if not.
"""
# Make sure we have a valid sg connection.
try:
sg.find_one("Project", [])
except Exception as e:
logger.warning(e)
return
return True
def tasks_approved(sg, logger, event, args):
"""
Handles the logic to approve a Task and update associated entities.
:param sg: Shotgun API object handle.
:param logger: Logging object.
:param event: Event object.
:param args: Any args that have been passed in from the callback.
"""
# Return if we don't have all the field values we need.
if not event.get("meta", {}).get("entity_id"):
return
# Make some vars for convenience.
entity_id = event["meta"]["entity_id"]
# Re-query the Task to gather extra field values.
task = sg.find_one(
"Task",
[["id", "is", entity_id]],
[args["task_status_field"], args["downstream_tasks_field"]],
)
# Return if no Task is found (this can happen when the Task is deleted).
if not task:
logger.debug("Could not find Task ID %s" % entity_id)
return
# Return if our Task isn't set to a valid task_status.
elif task[args["task_status_field"]] not in args["task_status"]:
logger.debug(
"Task with ID %s not set to one of %s, skipping."
% (task["id"], args["task_status"])
)
return
# Init our SG batch update list variable.
batch_updates = []
# Get downstream tasks that need to be updated.
build_updates_for_downstream_tasks(sg, logger, task, batch_updates, args)
# Find any Notes linked to the current Task and close them.
if (
args.get("close_notes")
and args.get("note_status_field")
and args.get("closed_note_status")
):
notes = sg.find(
"Note",
[["tasks.Task.id", "is", task["id"]]],
["tasks", args["note_status_field"]],
)
for note in notes:
if all_note_tasks_approved(sg, note, args):
batch_updates.append(
{
"request_type": "update",
"entity_type": "Note",
"entity_id": note["id"],
"data": {args["note_status_field"]: args["closed_note_status"]},
}
)
# If we have something to do, do it!
if batch_updates:
sg.batch(batch_updates)
logger.info(
'All Notes attached to Task with ID %s will be set to "%s".'
% (task["id"], args["closed_note_status"])
)
else:
logger.info("Task with ID %s: nothing to do, skipping." % task["id"])
def build_updates_for_downstream_tasks(sg, logger, task, batch_updates, args):
"""
Loop through our downstream tasks and append any necessary updates to the
batch_updates list.
:param sg: A Shotgun API handle object.
:param task: A Shotgun Task dictionary.
:param batch_updates: A list sent to a Shotgun API batch command.
:param args: A Dict of user args.
"""
# Return if there are no downstream tasks.
if not task.get(args["downstream_tasks_field"]):
return
# Re-query all the downstream Tasks to gather their status and downstream
# tasks values.
downstream_tasks = sg.find(
"Task",
[["id", "in", [t["id"] for t in task[args["downstream_tasks_field"]]]]],
[
args["task_status_field"],
args["upstream_tasks_field"],
args["downstream_tasks_field"],
],
)
# Loop through our downstream tasks and append any necessary Task status
# updates to the batch_updates list.
for downstream_task in downstream_tasks:
# Make sure all upstream Tasks are also set to a valid status.
upstream_check = True
if len(downstream_task[args["upstream_tasks_field"]]) > 1:
for upstream_task in downstream_task[args["upstream_tasks_field"]]:
upstream_task = sg.find_one(
"Task",
[["id", "is", upstream_task["id"]]],
["sg_status_list"],
)
if (
upstream_task["sg_status_list"] not in args["task_status"]
and upstream_task["sg_status_list"]
not in args["downstream_task_status_recurse"]
):
upstream_check = False
break
if not upstream_check:
continue
if (
downstream_task.get(args["task_status_field"])
in args["downstream_task_status_activate"]
):
batch_updates.append(
{
"request_type": "update",
"entity_type": "Task",
"entity_id": downstream_task["id"],
"data": {
args["task_status_field"]: args["downstream_task_status_active"]
},
}
)
elif (
args.get("downstream_task_status_recurse")
and downstream_task.get(args["task_status_field"])
in args["downstream_task_status_recurse"]
):
build_updates_for_downstream_tasks(
sg, logger, downstream_task, batch_updates, args
)
def all_note_tasks_approved(sg, note, args):
"""
Determine if all Notes on the relevent Task have been approved.
:param sg: A Shotgun API handle object.
:param note: A Shotgun Note dictionary.
:param args: A dict of plugin args.
:returns: True if all Notes on the relevant Task have been approved, False
otherwise.
"""
# Re-query all Tasks attached to the Note to gather note_status_field values.
note_tasks = sg.find(
"Task",
[["id", "in", [t["id"] for t in note.get("tasks")]]],
[args["note_status_field"]],
)
# Loop through all Tasks attached to the Note and return False if any
# note_status_field values are not equal to our task_status.
for note_task in note_tasks:
if note_task.get(args["note_status_field"]) != args["task_status"]:
return False
return True
|
[] |
[] |
[
"SGDAEMON_TASKSAPPROVED_NAME",
"SG_SERVER",
"SGDAEMON_TASKSAPPROVED_KEY"
] |
[]
|
["SGDAEMON_TASKSAPPROVED_NAME", "SG_SERVER", "SGDAEMON_TASKSAPPROVED_KEY"]
|
python
| 3 | 0 | |
main.py
|
#!/usr/bin/env python
import argparse
import logging
import os
import re
import sys
import click
def valid_date(date_string):
DATE_INPUT_FORMAT = "%d-%m-%Y"
from datetime import datetime
try:
return datetime.strptime(date_string, DATE_INPUT_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(date_string)
raise argparse.ArgumentTypeError(msg)
@click.group()
def cli():
pass
@cli.command()
@click.option(
"--open",
"open_server",
is_flag=True,
help="Open the server for communication from outside",
default=False,
)
@click.option("--debug-js", is_flag=True, help="Don't minify the JavaScript files")
def testserver(open_server, debug_js):
from anyway import app
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S')
if debug_js:
app.config["ASSETS_DEBUG"] = True
default_host = "0.0.0.0" if open_server else "127.0.0.1"
app.run(debug=True, host=os.getenv("IP", default_host), port=int(os.getenv("PORT", 5000)))
@cli.group()
def update_news_flash():
pass
@update_news_flash.command()
@click.option("--source", default="", type=str)
@click.option("--news_flash_id", default="", type=str)
def update(source, news_flash_id):
from anyway.parsers import news_flash
if not source:
source = None
if not news_flash_id:
news_flash_id = None
return news_flash.update_all_in_db(source, news_flash_id)
@update_news_flash.command()
def remove_duplicate_news_flash_rows():
from anyway.parsers import news_flash_db_adapter
news_flash_db_adapter.init_db().remove_duplicate_rows()
@cli.group()
def process():
pass
@process.command()
@click.option("--specific_folder", is_flag=True, default=False)
@click.option("--delete_all", is_flag=True)
@click.option("--path", type=str, default="static/data/cbs")
@click.option("--batch_size", type=int, default=5000)
@click.option("--delete_start_date", type=str, default=None)
@click.option("--load_start_year", type=str, default="2005")
@click.option("--from_email", is_flag=True, default=False)
@click.option("--username", default="")
@click.option("--password", default="")
@click.option("--email_search_start_date", type=str, default="") # format - DD.MM.YYYY
def cbs(
specific_folder,
delete_all,
path,
batch_size,
delete_start_date,
load_start_year,
from_email,
username,
password,
email_search_start_date,
):
from anyway.parsers.cbs import main
return main(
specific_folder=specific_folder,
delete_all=delete_all,
path=path,
batch_size=batch_size,
delete_start_date=delete_start_date,
load_start_year=load_start_year,
from_email=from_email,
username=username,
password=password,
email_search_start_date=email_search_start_date,
)
@process.command()
def news_flash():
from anyway.parsers.news_flash import scrape_all
return scrape_all()
@process.command()
@click.option("--specific_folder", is_flag=True, default=False)
@click.option("--delete_all", is_flag=True)
@click.option("--path", type=str, default="static/data/cbs_vehicles_registered")
def registered_vehicles(specific_folder, delete_all, path):
from anyway.parsers.registered import main
return main(specific_folder=specific_folder, delete_all=delete_all, path=path)
@process.command()
@click.option("--path", type=str, default="static/data/traffic_volume")
def traffic_volume(path):
from anyway.parsers.traffic_volume import main
return main(path)
@process.command()
@click.argument("filename")
def rsa(filename):
from anyway.parsers.rsa import parse
return parse(filename)
@process.command()
@click.argument("filename", type=str, default="static/data/segments/road_segments.xlsx")
def road_segments(filename):
from anyway.parsers.road_segments import parse
return parse(filename)
@process.command()
@click.argument("filepath", type=str, default="static/data/schools/schools.csv")
@click.option("--batch_size", type=int, default=5000)
def schools(filepath, batch_size):
from anyway.parsers.schools import parse
return parse(filepath=filepath, batch_size=batch_size)
@process.command()
@click.argument(
"schools_description_filepath", type=str, default="static/data/schools/schools_description.xlsx"
)
@click.argument(
"schools_coordinates_filepath", type=str, default="static/data/schools/schools_coordinates.xlsx"
)
@click.option("--batch_size", type=int, default=5000)
def schools_with_description(
schools_description_filepath, schools_coordinates_filepath, batch_size
):
from anyway.parsers.schools_with_description import parse
return parse(
schools_description_filepath=schools_description_filepath,
schools_coordinates_filepath=schools_coordinates_filepath,
batch_size=batch_size,
)
@process.command()
@click.option(
"--start_date", default="01-01-2014", type=valid_date, help="The Start Date - format DD-MM-YYYY"
)
@click.option(
"--end_date", default="31-12-2018", type=valid_date, help="The End Date - format DD-MM-YYYY"
)
@click.option("--distance", default=0.5, help="float In KM. Default is 0.5 (500m)", type=float)
@click.option("--batch_size", type=int, default=5000)
def injured_around_schools(start_date, end_date, distance, batch_size):
from anyway.parsers.injured_around_schools import parse
return parse(start_date=start_date, end_date=end_date, distance=distance, batch_size=batch_size)
@process.command()
@click.option(
"--start_date", default="01-01-2019", type=valid_date, help="The Start Date - format DD-MM-YYYY"
)
@click.option(
"--end_date", default="01-01-2020", type=valid_date, help="The End Date - format DD-MM-YYYY"
)
def waze_data(start_date, end_date):
from anyway.parsers.waze.waze_data_parser import waze_parser
return waze_parser(
bucket_name="anyway-hasadna.appspot.com", start_date=start_date, end_date=end_date
)
@process.command()
@click.argument("filename", type=str, default="static/data/embedded_reports/embedded_reports.csv")
def embedded_reports(filename):
from anyway.parsers.embedded_reports import parse
return parse(filename)
@process.command()
@click.option('--update', 'update', is_flag=True,
help='Recalculates the cache (default is False)', default=False)
@click.option('--no_info', 'info', is_flag=True,
help='Prints info on cache (default is True)', default=True)
def infographics_data_cache(info, update):
"""Will refresh the infographics data cache"""
from anyway.parsers.infographics_data_cache_updater import main
return main(update=update, info=info)
@cli.group()
def preprocess():
pass
@preprocess.command()
@click.option("--path", type=str)
def preprocess_cbs(path):
from anyway.parsers.preprocessing_cbs_files import update_cbs_files_names
return update_cbs_files_names(path)
@cli.group()
def create_views():
pass
@create_views.command()
def cbs_views():
from anyway.parsers.cbs import create_views
return create_views()
@cli.group()
def update_dictionary_tables():
pass
@update_dictionary_tables.command()
@click.option("--path", type=str, default="static/data/cbs")
def update_cbs(path):
from anyway.parsers.cbs import update_dictionary_tables
return update_dictionary_tables(path)
@cli.group()
def truncate_dictionary_tables():
pass
@truncate_dictionary_tables.command()
@click.option("--path", type=str)
def truncate_cbs(path):
from anyway.parsers.cbs import truncate_dictionary_tables
return truncate_dictionary_tables(path)
@cli.command()
@click.argument("identifiers", nargs=-1)
def load_discussions(identifiers):
from anyway.models import DiscussionMarker
from flask_sqlalchemy import SQLAlchemy
from anyway.utilities import init_flask
app = init_flask()
db = SQLAlchemy(app)
identifiers = identifiers or sys.stdin
for identifier in identifiers:
identifier = identifier.strip()
m = re.match(r"\((\d+\.\d+),\s*(\d+\.\d+)\)", identifier)
if not m:
logging.error("Failed processing: " + identifier)
continue
(latitude, longitude) = m.group(1, 2)
marker = DiscussionMarker.parse(
{
"latitude": latitude,
"longitude": longitude,
"title": identifier,
"identifier": identifier,
}
)
try:
db.session.add(marker)
db.session.commit()
logging.info("Added: " + identifier)
except Exception as e:
db.session.rollback()
logging.warn("Failed: " + identifier + ": " + e)
@cli.group()
def scripts():
pass
@scripts.command()
@click.option(
"--start_date", default="01-01-2013", type=valid_date, help="The Start Date - format DD-MM-YYYY"
)
@click.option(
"--end_date", default="31-12-2017", type=valid_date, help="The End Date - format DD-MM-YYYY"
)
@click.option("--distance", default=0.5, help="float In KM. Default is 0.5 (500m)", type=float)
@click.option(
"--output_path", default="output", help="output file of the results. Default is output.csv"
)
def accidents_around_schools(start_date, end_date, distance, output_path):
from anyway.accidents_around_schools import main
return main(
start_date=start_date, end_date=end_date, distance=distance, output_path=output_path
)
if __name__ == "__main__":
cli(sys.argv[1:]) # pylint: disable=too-many-function-args
|
[] |
[] |
[
"PORT",
"IP"
] |
[]
|
["PORT", "IP"]
|
python
| 2 | 0 | |
main.go
|
package main
import (
"fmt"
"flag"
"os"
"log"
"strconv"
"io/ioutil"
"bufio"
"strings"
"time"
"net/http"
"net/url"
)
func checkErr(err error) {
if err != nil {
log.Fatal("something went wrong: ", err)
}
}
func checkReqErr(err error) {
if err != nil {
urlErr := err.(*url.Error)
if urlErr.Timeout() {
log.Fatal("request timed out")
} else {
log.Fatal("something went wrong: ", err)
}
}
}
func checkOk(data []byte) {
if dataStr := string(data); strings.Contains(dataStr, "\"ok\":false") {
log.Fatal("something went wrong: ", dataStr)
}
}
func main() {
botToken := os.Getenv("TG_BOT_TOKEN"); if len(botToken) == 0 {
log.Fatal("missing TG_BOT_TOKEN environment variable")
}
chatId, err := strconv.Atoi(os.Getenv("TG_CHAT_ID")); if err != nil {
log.Fatal("missing or invalid TG_CHAT_ID environment variable")
}
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flag.PrintDefaults()
}
var msg string
flag.StringVar(&msg, "m", "", "the message to send")
var file string
flag.StringVar(&file, "f", "", "file containing the message to send")
flag.Parse()
client := &http.Client {
Timeout: 10 * time.Second,
}
// check if command is run from pipe
fi, _ := os.Stdin.Stat()
if (fi.Mode() & os.ModeCharDevice) == 0 {
scanner := bufio.NewScanner(os.Stdin)
var sb strings.Builder
scanner.Scan()
sb.WriteString(scanner.Text())
for scanner.Scan() {
sb.WriteString("\n")
sb.WriteString(scanner.Text())
}
msg = sb.String()
} else {
if len(msg) == 0 && len(file) == 0 {
flag.Usage()
log.Fatal("no message specified")
} else if len(msg) > 0 && len(file) > 0 {
flag.Usage()
log.Fatal("cannot specify both -m and -f flags at the same time")
} else if len(file) > 0 {
data, err := ioutil.ReadFile(file)
checkErr(err)
msg = string(data)
}
// no need to check for -m flag because that's the default
}
baseUrl := fmt.Sprintf("https://api.telegram.org/bot%s/", botToken)
getMeUrl := baseUrl + "getMe"
res, err := client.Get(getMeUrl)
checkReqErr(err)
data, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
checkOk(data)
sendMessageUrl := baseUrl + "sendMessage"
reqBody := fmt.Sprintf("chat_id=%d&text=%s&parse_mode=MarkdownV2", chatId, msg)
res, err = client.Post(sendMessageUrl, "application/x-www-form-urlencoded", strings.NewReader(reqBody))
checkReqErr(err)
data, _ = ioutil.ReadAll(res.Body)
res.Body.Close()
checkOk(data)
}
|
[
"\"TG_BOT_TOKEN\"",
"\"TG_CHAT_ID\""
] |
[] |
[
"TG_BOT_TOKEN",
"TG_CHAT_ID"
] |
[]
|
["TG_BOT_TOKEN", "TG_CHAT_ID"]
|
go
| 2 | 0 | |
scripts/generate_results_report.py
|
# Collect the results of the various model test runs
import json
import os
SUMMARY_FILE = os.environ["SUMMARY_FILE"]
CONFIG = os.environ["CONFIG"]
DATASET = os.environ["DATASET_NAME"]
task_mapping = {
"story_report.json": "story_prediction",
"intent_report.json": "intent_classification",
"CRFEntityExtractor_report.json": "entity_prediction",
"DIETClassifier_report.json": "entity_prediction",
"response_selection_report.json": "response_selection",
}
def generate_json(file, task, data):
if not DATASET in data:
data = {DATASET: {CONFIG: {}}, **data}
elif not CONFIG in data[DATASET]:
data[DATASET] = {CONFIG: {}, **data[DATASET]}
data[DATASET][CONFIG] = {
**data[DATASET][CONFIG],
}
data[DATASET][CONFIG][task] = {**read_results(file)}
return data
def read_results(file):
with open(file) as json_file:
data = json.load(json_file)
keys = ["accuracy", "weighted avg", "macro avg", "micro avg"]
key_mapping = {
"weighted avg": "weighted_avg",
"macro avg": "macro_avg",
"micro avg": "micro_avg",
"accuracy": "accuracy"
}
result = {key_mapping[key]: data[key] for key in keys if key in data}
return result
if __name__ == "__main__":
data = {}
if os.path.exists(SUMMARY_FILE):
with open(SUMMARY_FILE) as json_file:
data = json.load(json_file)
for dirpath, dirnames, files in os.walk(os.environ["RESULT_DIR"]):
for f in files:
if f not in task_mapping.keys():
continue
data = generate_json(os.path.join(dirpath, f), task_mapping[f], data)
with open(SUMMARY_FILE, "w") as f:
json.dump(data, f, sort_keys=True, indent=2)
|
[] |
[] |
[
"SUMMARY_FILE",
"RESULT_DIR",
"DATASET_NAME",
"CONFIG"
] |
[]
|
["SUMMARY_FILE", "RESULT_DIR", "DATASET_NAME", "CONFIG"]
|
python
| 4 | 0 | |
src_archived/model_nn.py
|
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import PReLU
from keras.layers.core import Activation, Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential, load_model
from keras.utils import np_utils
from sklearn.preprocessing import StandardScaler
from .model import AbsModel
from .util import Util
import os
import tensorflow as tf
# tensorflowの警告抑制
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class ModelNN(AbsModel):
"""NNのモデルクラス
Attributes:
run_fold_name(str): 実行の名前とfoldの番号を組み合わせた名前
params(dict): ハイパーパラメータ
model(Model): 初期値はNoneで、train後に学習済みモデルを保持するのに使う
scaler(Model): 初期値はNoneで、train後に学習済みscalerを保持するのに使う
"""
def __init__(self, run_fold_name, params):
super().__init__(run_fold_name, params)
self.scaler = None
def train(self, tr_x, tr_y, va_x=None, va_y=None):
# データのセット・スケーリング
validation = va_x is not None
scaler = StandardScaler()
scaler.fit(tr_x)
tr_x = scaler.transform(tr_x)
tr_y = np_utils.to_categorical(tr_y, num_classes=9)
if validation:
va_x = scaler.transform(va_x)
va_y = np_utils.to_categorical(va_y, num_classes=9)
# パラメータ
nb_classes = 9
layers = self.params['layers']
dropout = self.params['dropout']
units = self.params['units']
nb_epoch = self.params['nb_epoch']
patience = self.params['patience']
# モデルの構築
model = Sequential()
model.add(Dense(units, input_shape=(tr_x.shape[1],)))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(dropout))
for l in range(layers - 1):
model.add(Dense(units))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
if validation:
early_stopping = EarlyStopping(monitor='val_loss',
patience=patience,
verbose=1,
restore_best_weights=True
)
model.fit(tr_x,
tr_y,
epochs=nb_epoch,
batch_size=128,
verbose=2,
validation_data=(va_x, va_y),
callbacks=[early_stopping]
)
else:
model.fit(tr_x,
tr_y,
nb_epoch=nb_epoch,
batch_size=128,
verbose=2
)
# モデル・スケーラーの保持
self.model = model
self.scaler = scaler
def predict(self, te_x):
te_x = self.scaler.transform(te_x)
pred = self.model.predict_proba(te_x)
return pred
def save_model(self):
model_path = os.path.join('../model_archived/model', f'{self.run_fold_name}.h5')
scaler_path = os.path.join('../model_archived/model', f'{self.run_fold_name}-scaler.pkl')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
self.model.save(model_path)
Util.dump(self.scaler, scaler_path)
def load_model(self):
model_path = os.path.join('../model_archived/model', f'{self.run_fold_name}.h5')
scaler_path = os.path.join('../model_archived/model', f'{self.run_fold_name}-scaler.pkl')
self.model = load_model(model_path)
self.scaler = Util.load(scaler_path)
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
rcon/cache_utils.py
|
import redis
import simplejson
import pickle
import logging
import os
import functools
from cachetools.func import ttl_cache as cachetools_ttl_cache
from contextlib import contextmanager
logger = logging.getLogger(__name__)
_REDIS_POOL = None
class RedisCached:
PREFIX = 'cached_'
def __init__(self, pool, ttl_seconds, function, is_method=False, cache_falsy=True, serializer=simplejson.dumps, deserializer=simplejson.loads):
self.red = redis.Redis(connection_pool=pool)
self.function = function
self.serializer = serializer
self.deserializer = deserializer
self.ttl_seconds = ttl_seconds
self.is_method = is_method
self.cache_falsy = cache_falsy
@staticmethod
def clear_all_caches(pool):
red = redis.Redis(connection_pool=pool)
keys = list(red.scan_iter(match=f"{RedisCached.PREFIX}*"))
logger.warning("Wiping cached values %s", keys)
if not keys:
return
return red.delete(*keys)
@property
def key_prefix(self):
return f'{self.PREFIX}{self.function.__qualname__}'
def key(self, *args, **kwargs):
if self.is_method:
args = args[1:]
params = self.serializer({'args': args, "kwargs": kwargs})
if isinstance(params, bytes):
return self.key_prefix.encode() + b'__' + params
return f"{self.key_prefix}__{params}"
@property
def __name__(self):
return self.function.__name__
@property
def __wrapped__(self):
return self.function
def __call__(self, *args, **kwargs):
val = None
key = self.key(*args, **kwargs)
try:
val = self.red.get(key)
except redis.exceptions.RedisError:
logger.exception("Unable to use cache")
if val is not None:
#logger.debug("Cache HIT for %s", self.key(*args, **kwargs))
return self.deserializer(val)
#logger.debug("Cache MISS for %s", self.key(*args, **kwargs))
val = self.function(*args, **kwargs)
if not val and not self.cache_falsy:
logger.debug("Caching falsy result is disabled for %s", self.__name__)
return val
try:
self.red.setex(key, self.ttl_seconds, self.serializer(val))
#logger.debug("Cache SET for %s", self.key(*args, **kwargs))
except redis.exceptions.RedisError:
logger.exception("Unable to set cache")
return val
def clear_for(self, *args, **kwargs):
key = self.key(*args, **kwargs)
if key:
self.red.delete(key)
def clear_all(self):
try:
keys = list(self.red.scan_iter(match=f"{self.key_prefix}*"))
if keys:
self.red.delete(*keys)
except redis.exceptions.RedisError:
logger.exception("Unable to clear cache")
else:
logger.debug("Cache CLEARED for %s", keys)
def get_redis_pool(decode_responses=True):
global _REDIS_POOL
redis_url = os.getenv('REDIS_URL')
if not redis_url:
return None
if _REDIS_POOL is None:
logger.warning("Redis pool initializing")
_REDIS_POOL = redis.ConnectionPool.from_url(
redis_url, max_connections=10, socket_connect_timeout=5,
socket_timeout=5, decode_responses=decode_responses
)
return _REDIS_POOL
def ttl_cache(ttl, *args, is_method=True, cache_falsy=True, **kwargs):
pool = get_redis_pool(decode_responses=False)
if not pool:
logger.debug("REDIS_URL is not set falling back to memory cache")
return cachetools_ttl_cache(*args, ttl=ttl, **kwargs)
def decorator(func):
cached_func = RedisCached(
pool, ttl, function=func, is_method=is_method, cache_falsy=cache_falsy, serializer=pickle.dumps, deserializer=pickle.loads)
def wrapper(*args, **kwargs):
# Re-wrapping to preserve function signature
return cached_func(*args, **kwargs)
functools.update_wrapper(wrapper, func)
wrapper.cache_clear = cached_func.clear_all
return wrapper
return decorator
@contextmanager
def invalidates(*cached_funcs):
for f in cached_funcs:
f.cache_clear()
yield None
for f in cached_funcs:
f.cache_clear()
|
[] |
[] |
[
"REDIS_URL"
] |
[]
|
["REDIS_URL"]
|
python
| 1 | 0 | |
examples/tci/v20190318/SubmitOneByOneClassTask.go
|
package main
import (
"fmt"
"github.com/Hyzhou/tencentcloud-sdk-go/tencentcloud/common"
"github.com/Hyzhou/tencentcloud-sdk-go/tencentcloud/common/errors"
"github.com/Hyzhou/tencentcloud-sdk-go/tencentcloud/common/profile"
tci "github.com/Hyzhou/tencentcloud-sdk-go/tencentcloud/tci/v20190318"
)
func main() {
credential := common.NewCredential(
// os.Getenv("TENCENTCLOUD_SECRET_ID"),
// os.Getenv("TENCENTCLOUD_SECRET_KEY"),
"", "",
)
cpf := profile.NewClientProfile()
cpf.HttpProfile.ReqMethod = "POST"
cpf.HttpProfile.ReqTimeout = 30
cpf.HttpProfile.Endpoint = "tci.tencentcloudapi.com"
client, _ := tci.NewClient(credential, "ap-guangzhou", cpf)
req := tci.NewSubmitOneByOneClassTaskRequest()
req.FileContent = common.StringPtr("https://edu-test-1253131631.cos.ap-guangzhou.myqcloud.com/aieduautotest/autotest_vedio.mp4")
req.FileType = common.StringPtr("vod_url")
req.Lang = common.Int64Ptr(0)
req.LibrarySet = common.StringPtrs([]string{"library_15603955264181591716"})
req.VocabLibNameList = common.StringPtrs([]string{"testlib2"})
req.VoiceEncodeType = common.Int64Ptr(1)
req.VoiceFileType = common.Int64Ptr(10)
// 通过client对象调用想要访问的接口,需要传入请求对象
response, err := client.SubmitOneByOneClassTask(req)
// 处理异常
fmt.Println(err)
if _, ok := err.(*errors.TencentCloudSDKError); ok {
fmt.Printf("An API error has returned: %s", err)
return
}
// 非SDK异常,直接失败。实际代码中可以加入其他的处理。
if err != nil {
panic(err)
}
// 打印返回的json字符串
fmt.Printf("%s", response.ToJsonString())
}
|
[
"\"TENCENTCLOUD_SECRET_ID\"",
"\"TENCENTCLOUD_SECRET_KEY\""
] |
[] |
[
"TENCENTCLOUD_SECRET_ID",
"TENCENTCLOUD_SECRET_KEY"
] |
[]
|
["TENCENTCLOUD_SECRET_ID", "TENCENTCLOUD_SECRET_KEY"]
|
go
| 2 | 0 | |
sdk/go/gcp/healthcare/pulumiTypes.go
|
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
// nolint: lll
package healthcare
import (
"context"
"reflect"
"github.com/pulumi/pulumi/sdk/go/pulumi"
)
type DatasetIamBindingCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
type DatasetIamBindingConditionInput interface {
pulumi.Input
ToDatasetIamBindingConditionOutput() DatasetIamBindingConditionOutput
ToDatasetIamBindingConditionOutputWithContext(context.Context) DatasetIamBindingConditionOutput
}
type DatasetIamBindingConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (DatasetIamBindingConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*DatasetIamBindingCondition)(nil)).Elem()
}
func (i DatasetIamBindingConditionArgs) ToDatasetIamBindingConditionOutput() DatasetIamBindingConditionOutput {
return i.ToDatasetIamBindingConditionOutputWithContext(context.Background())
}
func (i DatasetIamBindingConditionArgs) ToDatasetIamBindingConditionOutputWithContext(ctx context.Context) DatasetIamBindingConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(DatasetIamBindingConditionOutput)
}
func (i DatasetIamBindingConditionArgs) ToDatasetIamBindingConditionPtrOutput() DatasetIamBindingConditionPtrOutput {
return i.ToDatasetIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i DatasetIamBindingConditionArgs) ToDatasetIamBindingConditionPtrOutputWithContext(ctx context.Context) DatasetIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DatasetIamBindingConditionOutput).ToDatasetIamBindingConditionPtrOutputWithContext(ctx)
}
type DatasetIamBindingConditionPtrInput interface {
pulumi.Input
ToDatasetIamBindingConditionPtrOutput() DatasetIamBindingConditionPtrOutput
ToDatasetIamBindingConditionPtrOutputWithContext(context.Context) DatasetIamBindingConditionPtrOutput
}
type datasetIamBindingConditionPtrType DatasetIamBindingConditionArgs
func DatasetIamBindingConditionPtr(v *DatasetIamBindingConditionArgs) DatasetIamBindingConditionPtrInput { return (*datasetIamBindingConditionPtrType)(v)
}
func (*datasetIamBindingConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**DatasetIamBindingCondition)(nil)).Elem()
}
func (i *datasetIamBindingConditionPtrType) ToDatasetIamBindingConditionPtrOutput() DatasetIamBindingConditionPtrOutput {
return i.ToDatasetIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i *datasetIamBindingConditionPtrType) ToDatasetIamBindingConditionPtrOutputWithContext(ctx context.Context) DatasetIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DatasetIamBindingConditionPtrOutput)
}
type DatasetIamBindingConditionOutput struct { *pulumi.OutputState }
func (DatasetIamBindingConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*DatasetIamBindingCondition)(nil)).Elem()
}
func (o DatasetIamBindingConditionOutput) ToDatasetIamBindingConditionOutput() DatasetIamBindingConditionOutput {
return o
}
func (o DatasetIamBindingConditionOutput) ToDatasetIamBindingConditionOutputWithContext(ctx context.Context) DatasetIamBindingConditionOutput {
return o
}
func (o DatasetIamBindingConditionOutput) ToDatasetIamBindingConditionPtrOutput() DatasetIamBindingConditionPtrOutput {
return o.ToDatasetIamBindingConditionPtrOutputWithContext(context.Background())
}
func (o DatasetIamBindingConditionOutput) ToDatasetIamBindingConditionPtrOutputWithContext(ctx context.Context) DatasetIamBindingConditionPtrOutput {
return o.ApplyT(func(v DatasetIamBindingCondition) *DatasetIamBindingCondition {
return &v
}).(DatasetIamBindingConditionPtrOutput)
}
func (o DatasetIamBindingConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v DatasetIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o DatasetIamBindingConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v DatasetIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o DatasetIamBindingConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v DatasetIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type DatasetIamBindingConditionPtrOutput struct { *pulumi.OutputState}
func (DatasetIamBindingConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**DatasetIamBindingCondition)(nil)).Elem()
}
func (o DatasetIamBindingConditionPtrOutput) ToDatasetIamBindingConditionPtrOutput() DatasetIamBindingConditionPtrOutput {
return o
}
func (o DatasetIamBindingConditionPtrOutput) ToDatasetIamBindingConditionPtrOutputWithContext(ctx context.Context) DatasetIamBindingConditionPtrOutput {
return o
}
func (o DatasetIamBindingConditionPtrOutput) Elem() DatasetIamBindingConditionOutput {
return o.ApplyT(func (v *DatasetIamBindingCondition) DatasetIamBindingCondition { return *v }).(DatasetIamBindingConditionOutput)
}
func (o DatasetIamBindingConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v DatasetIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o DatasetIamBindingConditionPtrOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v DatasetIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o DatasetIamBindingConditionPtrOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v DatasetIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type DatasetIamMemberCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
type DatasetIamMemberConditionInput interface {
pulumi.Input
ToDatasetIamMemberConditionOutput() DatasetIamMemberConditionOutput
ToDatasetIamMemberConditionOutputWithContext(context.Context) DatasetIamMemberConditionOutput
}
type DatasetIamMemberConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (DatasetIamMemberConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*DatasetIamMemberCondition)(nil)).Elem()
}
func (i DatasetIamMemberConditionArgs) ToDatasetIamMemberConditionOutput() DatasetIamMemberConditionOutput {
return i.ToDatasetIamMemberConditionOutputWithContext(context.Background())
}
func (i DatasetIamMemberConditionArgs) ToDatasetIamMemberConditionOutputWithContext(ctx context.Context) DatasetIamMemberConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(DatasetIamMemberConditionOutput)
}
func (i DatasetIamMemberConditionArgs) ToDatasetIamMemberConditionPtrOutput() DatasetIamMemberConditionPtrOutput {
return i.ToDatasetIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i DatasetIamMemberConditionArgs) ToDatasetIamMemberConditionPtrOutputWithContext(ctx context.Context) DatasetIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DatasetIamMemberConditionOutput).ToDatasetIamMemberConditionPtrOutputWithContext(ctx)
}
type DatasetIamMemberConditionPtrInput interface {
pulumi.Input
ToDatasetIamMemberConditionPtrOutput() DatasetIamMemberConditionPtrOutput
ToDatasetIamMemberConditionPtrOutputWithContext(context.Context) DatasetIamMemberConditionPtrOutput
}
type datasetIamMemberConditionPtrType DatasetIamMemberConditionArgs
func DatasetIamMemberConditionPtr(v *DatasetIamMemberConditionArgs) DatasetIamMemberConditionPtrInput { return (*datasetIamMemberConditionPtrType)(v)
}
func (*datasetIamMemberConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**DatasetIamMemberCondition)(nil)).Elem()
}
func (i *datasetIamMemberConditionPtrType) ToDatasetIamMemberConditionPtrOutput() DatasetIamMemberConditionPtrOutput {
return i.ToDatasetIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i *datasetIamMemberConditionPtrType) ToDatasetIamMemberConditionPtrOutputWithContext(ctx context.Context) DatasetIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DatasetIamMemberConditionPtrOutput)
}
type DatasetIamMemberConditionOutput struct { *pulumi.OutputState }
func (DatasetIamMemberConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*DatasetIamMemberCondition)(nil)).Elem()
}
func (o DatasetIamMemberConditionOutput) ToDatasetIamMemberConditionOutput() DatasetIamMemberConditionOutput {
return o
}
func (o DatasetIamMemberConditionOutput) ToDatasetIamMemberConditionOutputWithContext(ctx context.Context) DatasetIamMemberConditionOutput {
return o
}
func (o DatasetIamMemberConditionOutput) ToDatasetIamMemberConditionPtrOutput() DatasetIamMemberConditionPtrOutput {
return o.ToDatasetIamMemberConditionPtrOutputWithContext(context.Background())
}
func (o DatasetIamMemberConditionOutput) ToDatasetIamMemberConditionPtrOutputWithContext(ctx context.Context) DatasetIamMemberConditionPtrOutput {
return o.ApplyT(func(v DatasetIamMemberCondition) *DatasetIamMemberCondition {
return &v
}).(DatasetIamMemberConditionPtrOutput)
}
func (o DatasetIamMemberConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v DatasetIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o DatasetIamMemberConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v DatasetIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o DatasetIamMemberConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v DatasetIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type DatasetIamMemberConditionPtrOutput struct { *pulumi.OutputState}
func (DatasetIamMemberConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**DatasetIamMemberCondition)(nil)).Elem()
}
func (o DatasetIamMemberConditionPtrOutput) ToDatasetIamMemberConditionPtrOutput() DatasetIamMemberConditionPtrOutput {
return o
}
func (o DatasetIamMemberConditionPtrOutput) ToDatasetIamMemberConditionPtrOutputWithContext(ctx context.Context) DatasetIamMemberConditionPtrOutput {
return o
}
func (o DatasetIamMemberConditionPtrOutput) Elem() DatasetIamMemberConditionOutput {
return o.ApplyT(func (v *DatasetIamMemberCondition) DatasetIamMemberCondition { return *v }).(DatasetIamMemberConditionOutput)
}
func (o DatasetIamMemberConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v DatasetIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o DatasetIamMemberConditionPtrOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v DatasetIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o DatasetIamMemberConditionPtrOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v DatasetIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type DicomStoreIamBindingCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
type DicomStoreIamBindingConditionInput interface {
pulumi.Input
ToDicomStoreIamBindingConditionOutput() DicomStoreIamBindingConditionOutput
ToDicomStoreIamBindingConditionOutputWithContext(context.Context) DicomStoreIamBindingConditionOutput
}
type DicomStoreIamBindingConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (DicomStoreIamBindingConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*DicomStoreIamBindingCondition)(nil)).Elem()
}
func (i DicomStoreIamBindingConditionArgs) ToDicomStoreIamBindingConditionOutput() DicomStoreIamBindingConditionOutput {
return i.ToDicomStoreIamBindingConditionOutputWithContext(context.Background())
}
func (i DicomStoreIamBindingConditionArgs) ToDicomStoreIamBindingConditionOutputWithContext(ctx context.Context) DicomStoreIamBindingConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamBindingConditionOutput)
}
func (i DicomStoreIamBindingConditionArgs) ToDicomStoreIamBindingConditionPtrOutput() DicomStoreIamBindingConditionPtrOutput {
return i.ToDicomStoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i DicomStoreIamBindingConditionArgs) ToDicomStoreIamBindingConditionPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamBindingConditionOutput).ToDicomStoreIamBindingConditionPtrOutputWithContext(ctx)
}
type DicomStoreIamBindingConditionPtrInput interface {
pulumi.Input
ToDicomStoreIamBindingConditionPtrOutput() DicomStoreIamBindingConditionPtrOutput
ToDicomStoreIamBindingConditionPtrOutputWithContext(context.Context) DicomStoreIamBindingConditionPtrOutput
}
type dicomStoreIamBindingConditionPtrType DicomStoreIamBindingConditionArgs
func DicomStoreIamBindingConditionPtr(v *DicomStoreIamBindingConditionArgs) DicomStoreIamBindingConditionPtrInput { return (*dicomStoreIamBindingConditionPtrType)(v)
}
func (*dicomStoreIamBindingConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**DicomStoreIamBindingCondition)(nil)).Elem()
}
func (i *dicomStoreIamBindingConditionPtrType) ToDicomStoreIamBindingConditionPtrOutput() DicomStoreIamBindingConditionPtrOutput {
return i.ToDicomStoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i *dicomStoreIamBindingConditionPtrType) ToDicomStoreIamBindingConditionPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamBindingConditionPtrOutput)
}
type DicomStoreIamBindingConditionOutput struct { *pulumi.OutputState }
func (DicomStoreIamBindingConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*DicomStoreIamBindingCondition)(nil)).Elem()
}
func (o DicomStoreIamBindingConditionOutput) ToDicomStoreIamBindingConditionOutput() DicomStoreIamBindingConditionOutput {
return o
}
func (o DicomStoreIamBindingConditionOutput) ToDicomStoreIamBindingConditionOutputWithContext(ctx context.Context) DicomStoreIamBindingConditionOutput {
return o
}
func (o DicomStoreIamBindingConditionOutput) ToDicomStoreIamBindingConditionPtrOutput() DicomStoreIamBindingConditionPtrOutput {
return o.ToDicomStoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (o DicomStoreIamBindingConditionOutput) ToDicomStoreIamBindingConditionPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingConditionPtrOutput {
return o.ApplyT(func(v DicomStoreIamBindingCondition) *DicomStoreIamBindingCondition {
return &v
}).(DicomStoreIamBindingConditionPtrOutput)
}
func (o DicomStoreIamBindingConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v DicomStoreIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o DicomStoreIamBindingConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o DicomStoreIamBindingConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type DicomStoreIamBindingConditionPtrOutput struct { *pulumi.OutputState}
func (DicomStoreIamBindingConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**DicomStoreIamBindingCondition)(nil)).Elem()
}
func (o DicomStoreIamBindingConditionPtrOutput) ToDicomStoreIamBindingConditionPtrOutput() DicomStoreIamBindingConditionPtrOutput {
return o
}
func (o DicomStoreIamBindingConditionPtrOutput) ToDicomStoreIamBindingConditionPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingConditionPtrOutput {
return o
}
func (o DicomStoreIamBindingConditionPtrOutput) Elem() DicomStoreIamBindingConditionOutput {
return o.ApplyT(func (v *DicomStoreIamBindingCondition) DicomStoreIamBindingCondition { return *v }).(DicomStoreIamBindingConditionOutput)
}
func (o DicomStoreIamBindingConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v DicomStoreIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o DicomStoreIamBindingConditionPtrOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o DicomStoreIamBindingConditionPtrOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type DicomStoreIamMemberCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
type DicomStoreIamMemberConditionInput interface {
pulumi.Input
ToDicomStoreIamMemberConditionOutput() DicomStoreIamMemberConditionOutput
ToDicomStoreIamMemberConditionOutputWithContext(context.Context) DicomStoreIamMemberConditionOutput
}
type DicomStoreIamMemberConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (DicomStoreIamMemberConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*DicomStoreIamMemberCondition)(nil)).Elem()
}
func (i DicomStoreIamMemberConditionArgs) ToDicomStoreIamMemberConditionOutput() DicomStoreIamMemberConditionOutput {
return i.ToDicomStoreIamMemberConditionOutputWithContext(context.Background())
}
func (i DicomStoreIamMemberConditionArgs) ToDicomStoreIamMemberConditionOutputWithContext(ctx context.Context) DicomStoreIamMemberConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamMemberConditionOutput)
}
func (i DicomStoreIamMemberConditionArgs) ToDicomStoreIamMemberConditionPtrOutput() DicomStoreIamMemberConditionPtrOutput {
return i.ToDicomStoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i DicomStoreIamMemberConditionArgs) ToDicomStoreIamMemberConditionPtrOutputWithContext(ctx context.Context) DicomStoreIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamMemberConditionOutput).ToDicomStoreIamMemberConditionPtrOutputWithContext(ctx)
}
type DicomStoreIamMemberConditionPtrInput interface {
pulumi.Input
ToDicomStoreIamMemberConditionPtrOutput() DicomStoreIamMemberConditionPtrOutput
ToDicomStoreIamMemberConditionPtrOutputWithContext(context.Context) DicomStoreIamMemberConditionPtrOutput
}
type dicomStoreIamMemberConditionPtrType DicomStoreIamMemberConditionArgs
func DicomStoreIamMemberConditionPtr(v *DicomStoreIamMemberConditionArgs) DicomStoreIamMemberConditionPtrInput { return (*dicomStoreIamMemberConditionPtrType)(v)
}
func (*dicomStoreIamMemberConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**DicomStoreIamMemberCondition)(nil)).Elem()
}
func (i *dicomStoreIamMemberConditionPtrType) ToDicomStoreIamMemberConditionPtrOutput() DicomStoreIamMemberConditionPtrOutput {
return i.ToDicomStoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i *dicomStoreIamMemberConditionPtrType) ToDicomStoreIamMemberConditionPtrOutputWithContext(ctx context.Context) DicomStoreIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamMemberConditionPtrOutput)
}
type DicomStoreIamMemberConditionOutput struct { *pulumi.OutputState }
func (DicomStoreIamMemberConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*DicomStoreIamMemberCondition)(nil)).Elem()
}
func (o DicomStoreIamMemberConditionOutput) ToDicomStoreIamMemberConditionOutput() DicomStoreIamMemberConditionOutput {
return o
}
func (o DicomStoreIamMemberConditionOutput) ToDicomStoreIamMemberConditionOutputWithContext(ctx context.Context) DicomStoreIamMemberConditionOutput {
return o
}
func (o DicomStoreIamMemberConditionOutput) ToDicomStoreIamMemberConditionPtrOutput() DicomStoreIamMemberConditionPtrOutput {
return o.ToDicomStoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (o DicomStoreIamMemberConditionOutput) ToDicomStoreIamMemberConditionPtrOutputWithContext(ctx context.Context) DicomStoreIamMemberConditionPtrOutput {
return o.ApplyT(func(v DicomStoreIamMemberCondition) *DicomStoreIamMemberCondition {
return &v
}).(DicomStoreIamMemberConditionPtrOutput)
}
func (o DicomStoreIamMemberConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v DicomStoreIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o DicomStoreIamMemberConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o DicomStoreIamMemberConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type DicomStoreIamMemberConditionPtrOutput struct { *pulumi.OutputState}
func (DicomStoreIamMemberConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**DicomStoreIamMemberCondition)(nil)).Elem()
}
func (o DicomStoreIamMemberConditionPtrOutput) ToDicomStoreIamMemberConditionPtrOutput() DicomStoreIamMemberConditionPtrOutput {
return o
}
func (o DicomStoreIamMemberConditionPtrOutput) ToDicomStoreIamMemberConditionPtrOutputWithContext(ctx context.Context) DicomStoreIamMemberConditionPtrOutput {
return o
}
func (o DicomStoreIamMemberConditionPtrOutput) Elem() DicomStoreIamMemberConditionOutput {
return o.ApplyT(func (v *DicomStoreIamMemberCondition) DicomStoreIamMemberCondition { return *v }).(DicomStoreIamMemberConditionOutput)
}
func (o DicomStoreIamMemberConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v DicomStoreIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o DicomStoreIamMemberConditionPtrOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o DicomStoreIamMemberConditionPtrOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type DicomStoreNotificationConfig struct {
PubsubTopic string `pulumi:"pubsubTopic"`
}
type DicomStoreNotificationConfigInput interface {
pulumi.Input
ToDicomStoreNotificationConfigOutput() DicomStoreNotificationConfigOutput
ToDicomStoreNotificationConfigOutputWithContext(context.Context) DicomStoreNotificationConfigOutput
}
type DicomStoreNotificationConfigArgs struct {
PubsubTopic pulumi.StringInput `pulumi:"pubsubTopic"`
}
func (DicomStoreNotificationConfigArgs) ElementType() reflect.Type {
return reflect.TypeOf((*DicomStoreNotificationConfig)(nil)).Elem()
}
func (i DicomStoreNotificationConfigArgs) ToDicomStoreNotificationConfigOutput() DicomStoreNotificationConfigOutput {
return i.ToDicomStoreNotificationConfigOutputWithContext(context.Background())
}
func (i DicomStoreNotificationConfigArgs) ToDicomStoreNotificationConfigOutputWithContext(ctx context.Context) DicomStoreNotificationConfigOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreNotificationConfigOutput)
}
func (i DicomStoreNotificationConfigArgs) ToDicomStoreNotificationConfigPtrOutput() DicomStoreNotificationConfigPtrOutput {
return i.ToDicomStoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (i DicomStoreNotificationConfigArgs) ToDicomStoreNotificationConfigPtrOutputWithContext(ctx context.Context) DicomStoreNotificationConfigPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreNotificationConfigOutput).ToDicomStoreNotificationConfigPtrOutputWithContext(ctx)
}
type DicomStoreNotificationConfigPtrInput interface {
pulumi.Input
ToDicomStoreNotificationConfigPtrOutput() DicomStoreNotificationConfigPtrOutput
ToDicomStoreNotificationConfigPtrOutputWithContext(context.Context) DicomStoreNotificationConfigPtrOutput
}
type dicomStoreNotificationConfigPtrType DicomStoreNotificationConfigArgs
func DicomStoreNotificationConfigPtr(v *DicomStoreNotificationConfigArgs) DicomStoreNotificationConfigPtrInput { return (*dicomStoreNotificationConfigPtrType)(v)
}
func (*dicomStoreNotificationConfigPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**DicomStoreNotificationConfig)(nil)).Elem()
}
func (i *dicomStoreNotificationConfigPtrType) ToDicomStoreNotificationConfigPtrOutput() DicomStoreNotificationConfigPtrOutput {
return i.ToDicomStoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (i *dicomStoreNotificationConfigPtrType) ToDicomStoreNotificationConfigPtrOutputWithContext(ctx context.Context) DicomStoreNotificationConfigPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreNotificationConfigPtrOutput)
}
type DicomStoreNotificationConfigOutput struct { *pulumi.OutputState }
func (DicomStoreNotificationConfigOutput) ElementType() reflect.Type {
return reflect.TypeOf((*DicomStoreNotificationConfig)(nil)).Elem()
}
func (o DicomStoreNotificationConfigOutput) ToDicomStoreNotificationConfigOutput() DicomStoreNotificationConfigOutput {
return o
}
func (o DicomStoreNotificationConfigOutput) ToDicomStoreNotificationConfigOutputWithContext(ctx context.Context) DicomStoreNotificationConfigOutput {
return o
}
func (o DicomStoreNotificationConfigOutput) ToDicomStoreNotificationConfigPtrOutput() DicomStoreNotificationConfigPtrOutput {
return o.ToDicomStoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (o DicomStoreNotificationConfigOutput) ToDicomStoreNotificationConfigPtrOutputWithContext(ctx context.Context) DicomStoreNotificationConfigPtrOutput {
return o.ApplyT(func(v DicomStoreNotificationConfig) *DicomStoreNotificationConfig {
return &v
}).(DicomStoreNotificationConfigPtrOutput)
}
func (o DicomStoreNotificationConfigOutput) PubsubTopic() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreNotificationConfig) string { return v.PubsubTopic }).(pulumi.StringOutput)
}
type DicomStoreNotificationConfigPtrOutput struct { *pulumi.OutputState}
func (DicomStoreNotificationConfigPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**DicomStoreNotificationConfig)(nil)).Elem()
}
func (o DicomStoreNotificationConfigPtrOutput) ToDicomStoreNotificationConfigPtrOutput() DicomStoreNotificationConfigPtrOutput {
return o
}
func (o DicomStoreNotificationConfigPtrOutput) ToDicomStoreNotificationConfigPtrOutputWithContext(ctx context.Context) DicomStoreNotificationConfigPtrOutput {
return o
}
func (o DicomStoreNotificationConfigPtrOutput) Elem() DicomStoreNotificationConfigOutput {
return o.ApplyT(func (v *DicomStoreNotificationConfig) DicomStoreNotificationConfig { return *v }).(DicomStoreNotificationConfigOutput)
}
func (o DicomStoreNotificationConfigPtrOutput) PubsubTopic() pulumi.StringOutput {
return o.ApplyT(func (v DicomStoreNotificationConfig) string { return v.PubsubTopic }).(pulumi.StringOutput)
}
type FhirStoreIamBindingCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
type FhirStoreIamBindingConditionInput interface {
pulumi.Input
ToFhirStoreIamBindingConditionOutput() FhirStoreIamBindingConditionOutput
ToFhirStoreIamBindingConditionOutputWithContext(context.Context) FhirStoreIamBindingConditionOutput
}
type FhirStoreIamBindingConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (FhirStoreIamBindingConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*FhirStoreIamBindingCondition)(nil)).Elem()
}
func (i FhirStoreIamBindingConditionArgs) ToFhirStoreIamBindingConditionOutput() FhirStoreIamBindingConditionOutput {
return i.ToFhirStoreIamBindingConditionOutputWithContext(context.Background())
}
func (i FhirStoreIamBindingConditionArgs) ToFhirStoreIamBindingConditionOutputWithContext(ctx context.Context) FhirStoreIamBindingConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreIamBindingConditionOutput)
}
func (i FhirStoreIamBindingConditionArgs) ToFhirStoreIamBindingConditionPtrOutput() FhirStoreIamBindingConditionPtrOutput {
return i.ToFhirStoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i FhirStoreIamBindingConditionArgs) ToFhirStoreIamBindingConditionPtrOutputWithContext(ctx context.Context) FhirStoreIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreIamBindingConditionOutput).ToFhirStoreIamBindingConditionPtrOutputWithContext(ctx)
}
type FhirStoreIamBindingConditionPtrInput interface {
pulumi.Input
ToFhirStoreIamBindingConditionPtrOutput() FhirStoreIamBindingConditionPtrOutput
ToFhirStoreIamBindingConditionPtrOutputWithContext(context.Context) FhirStoreIamBindingConditionPtrOutput
}
type fhirStoreIamBindingConditionPtrType FhirStoreIamBindingConditionArgs
func FhirStoreIamBindingConditionPtr(v *FhirStoreIamBindingConditionArgs) FhirStoreIamBindingConditionPtrInput { return (*fhirStoreIamBindingConditionPtrType)(v)
}
func (*fhirStoreIamBindingConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**FhirStoreIamBindingCondition)(nil)).Elem()
}
func (i *fhirStoreIamBindingConditionPtrType) ToFhirStoreIamBindingConditionPtrOutput() FhirStoreIamBindingConditionPtrOutput {
return i.ToFhirStoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i *fhirStoreIamBindingConditionPtrType) ToFhirStoreIamBindingConditionPtrOutputWithContext(ctx context.Context) FhirStoreIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreIamBindingConditionPtrOutput)
}
type FhirStoreIamBindingConditionOutput struct { *pulumi.OutputState }
func (FhirStoreIamBindingConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*FhirStoreIamBindingCondition)(nil)).Elem()
}
func (o FhirStoreIamBindingConditionOutput) ToFhirStoreIamBindingConditionOutput() FhirStoreIamBindingConditionOutput {
return o
}
func (o FhirStoreIamBindingConditionOutput) ToFhirStoreIamBindingConditionOutputWithContext(ctx context.Context) FhirStoreIamBindingConditionOutput {
return o
}
func (o FhirStoreIamBindingConditionOutput) ToFhirStoreIamBindingConditionPtrOutput() FhirStoreIamBindingConditionPtrOutput {
return o.ToFhirStoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (o FhirStoreIamBindingConditionOutput) ToFhirStoreIamBindingConditionPtrOutputWithContext(ctx context.Context) FhirStoreIamBindingConditionPtrOutput {
return o.ApplyT(func(v FhirStoreIamBindingCondition) *FhirStoreIamBindingCondition {
return &v
}).(FhirStoreIamBindingConditionPtrOutput)
}
func (o FhirStoreIamBindingConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v FhirStoreIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o FhirStoreIamBindingConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o FhirStoreIamBindingConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type FhirStoreIamBindingConditionPtrOutput struct { *pulumi.OutputState}
func (FhirStoreIamBindingConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**FhirStoreIamBindingCondition)(nil)).Elem()
}
func (o FhirStoreIamBindingConditionPtrOutput) ToFhirStoreIamBindingConditionPtrOutput() FhirStoreIamBindingConditionPtrOutput {
return o
}
func (o FhirStoreIamBindingConditionPtrOutput) ToFhirStoreIamBindingConditionPtrOutputWithContext(ctx context.Context) FhirStoreIamBindingConditionPtrOutput {
return o
}
func (o FhirStoreIamBindingConditionPtrOutput) Elem() FhirStoreIamBindingConditionOutput {
return o.ApplyT(func (v *FhirStoreIamBindingCondition) FhirStoreIamBindingCondition { return *v }).(FhirStoreIamBindingConditionOutput)
}
func (o FhirStoreIamBindingConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v FhirStoreIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o FhirStoreIamBindingConditionPtrOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o FhirStoreIamBindingConditionPtrOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type FhirStoreIamMemberCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
type FhirStoreIamMemberConditionInput interface {
pulumi.Input
ToFhirStoreIamMemberConditionOutput() FhirStoreIamMemberConditionOutput
ToFhirStoreIamMemberConditionOutputWithContext(context.Context) FhirStoreIamMemberConditionOutput
}
type FhirStoreIamMemberConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (FhirStoreIamMemberConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*FhirStoreIamMemberCondition)(nil)).Elem()
}
func (i FhirStoreIamMemberConditionArgs) ToFhirStoreIamMemberConditionOutput() FhirStoreIamMemberConditionOutput {
return i.ToFhirStoreIamMemberConditionOutputWithContext(context.Background())
}
func (i FhirStoreIamMemberConditionArgs) ToFhirStoreIamMemberConditionOutputWithContext(ctx context.Context) FhirStoreIamMemberConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreIamMemberConditionOutput)
}
func (i FhirStoreIamMemberConditionArgs) ToFhirStoreIamMemberConditionPtrOutput() FhirStoreIamMemberConditionPtrOutput {
return i.ToFhirStoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i FhirStoreIamMemberConditionArgs) ToFhirStoreIamMemberConditionPtrOutputWithContext(ctx context.Context) FhirStoreIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreIamMemberConditionOutput).ToFhirStoreIamMemberConditionPtrOutputWithContext(ctx)
}
type FhirStoreIamMemberConditionPtrInput interface {
pulumi.Input
ToFhirStoreIamMemberConditionPtrOutput() FhirStoreIamMemberConditionPtrOutput
ToFhirStoreIamMemberConditionPtrOutputWithContext(context.Context) FhirStoreIamMemberConditionPtrOutput
}
type fhirStoreIamMemberConditionPtrType FhirStoreIamMemberConditionArgs
func FhirStoreIamMemberConditionPtr(v *FhirStoreIamMemberConditionArgs) FhirStoreIamMemberConditionPtrInput { return (*fhirStoreIamMemberConditionPtrType)(v)
}
func (*fhirStoreIamMemberConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**FhirStoreIamMemberCondition)(nil)).Elem()
}
func (i *fhirStoreIamMemberConditionPtrType) ToFhirStoreIamMemberConditionPtrOutput() FhirStoreIamMemberConditionPtrOutput {
return i.ToFhirStoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i *fhirStoreIamMemberConditionPtrType) ToFhirStoreIamMemberConditionPtrOutputWithContext(ctx context.Context) FhirStoreIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreIamMemberConditionPtrOutput)
}
type FhirStoreIamMemberConditionOutput struct { *pulumi.OutputState }
func (FhirStoreIamMemberConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*FhirStoreIamMemberCondition)(nil)).Elem()
}
func (o FhirStoreIamMemberConditionOutput) ToFhirStoreIamMemberConditionOutput() FhirStoreIamMemberConditionOutput {
return o
}
func (o FhirStoreIamMemberConditionOutput) ToFhirStoreIamMemberConditionOutputWithContext(ctx context.Context) FhirStoreIamMemberConditionOutput {
return o
}
func (o FhirStoreIamMemberConditionOutput) ToFhirStoreIamMemberConditionPtrOutput() FhirStoreIamMemberConditionPtrOutput {
return o.ToFhirStoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (o FhirStoreIamMemberConditionOutput) ToFhirStoreIamMemberConditionPtrOutputWithContext(ctx context.Context) FhirStoreIamMemberConditionPtrOutput {
return o.ApplyT(func(v FhirStoreIamMemberCondition) *FhirStoreIamMemberCondition {
return &v
}).(FhirStoreIamMemberConditionPtrOutput)
}
func (o FhirStoreIamMemberConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v FhirStoreIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o FhirStoreIamMemberConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o FhirStoreIamMemberConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type FhirStoreIamMemberConditionPtrOutput struct { *pulumi.OutputState}
func (FhirStoreIamMemberConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**FhirStoreIamMemberCondition)(nil)).Elem()
}
func (o FhirStoreIamMemberConditionPtrOutput) ToFhirStoreIamMemberConditionPtrOutput() FhirStoreIamMemberConditionPtrOutput {
return o
}
func (o FhirStoreIamMemberConditionPtrOutput) ToFhirStoreIamMemberConditionPtrOutputWithContext(ctx context.Context) FhirStoreIamMemberConditionPtrOutput {
return o
}
func (o FhirStoreIamMemberConditionPtrOutput) Elem() FhirStoreIamMemberConditionOutput {
return o.ApplyT(func (v *FhirStoreIamMemberCondition) FhirStoreIamMemberCondition { return *v }).(FhirStoreIamMemberConditionOutput)
}
func (o FhirStoreIamMemberConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v FhirStoreIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o FhirStoreIamMemberConditionPtrOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o FhirStoreIamMemberConditionPtrOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type FhirStoreNotificationConfig struct {
PubsubTopic string `pulumi:"pubsubTopic"`
}
type FhirStoreNotificationConfigInput interface {
pulumi.Input
ToFhirStoreNotificationConfigOutput() FhirStoreNotificationConfigOutput
ToFhirStoreNotificationConfigOutputWithContext(context.Context) FhirStoreNotificationConfigOutput
}
type FhirStoreNotificationConfigArgs struct {
PubsubTopic pulumi.StringInput `pulumi:"pubsubTopic"`
}
func (FhirStoreNotificationConfigArgs) ElementType() reflect.Type {
return reflect.TypeOf((*FhirStoreNotificationConfig)(nil)).Elem()
}
func (i FhirStoreNotificationConfigArgs) ToFhirStoreNotificationConfigOutput() FhirStoreNotificationConfigOutput {
return i.ToFhirStoreNotificationConfigOutputWithContext(context.Background())
}
func (i FhirStoreNotificationConfigArgs) ToFhirStoreNotificationConfigOutputWithContext(ctx context.Context) FhirStoreNotificationConfigOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreNotificationConfigOutput)
}
func (i FhirStoreNotificationConfigArgs) ToFhirStoreNotificationConfigPtrOutput() FhirStoreNotificationConfigPtrOutput {
return i.ToFhirStoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (i FhirStoreNotificationConfigArgs) ToFhirStoreNotificationConfigPtrOutputWithContext(ctx context.Context) FhirStoreNotificationConfigPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreNotificationConfigOutput).ToFhirStoreNotificationConfigPtrOutputWithContext(ctx)
}
type FhirStoreNotificationConfigPtrInput interface {
pulumi.Input
ToFhirStoreNotificationConfigPtrOutput() FhirStoreNotificationConfigPtrOutput
ToFhirStoreNotificationConfigPtrOutputWithContext(context.Context) FhirStoreNotificationConfigPtrOutput
}
type fhirStoreNotificationConfigPtrType FhirStoreNotificationConfigArgs
func FhirStoreNotificationConfigPtr(v *FhirStoreNotificationConfigArgs) FhirStoreNotificationConfigPtrInput { return (*fhirStoreNotificationConfigPtrType)(v)
}
func (*fhirStoreNotificationConfigPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**FhirStoreNotificationConfig)(nil)).Elem()
}
func (i *fhirStoreNotificationConfigPtrType) ToFhirStoreNotificationConfigPtrOutput() FhirStoreNotificationConfigPtrOutput {
return i.ToFhirStoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (i *fhirStoreNotificationConfigPtrType) ToFhirStoreNotificationConfigPtrOutputWithContext(ctx context.Context) FhirStoreNotificationConfigPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(FhirStoreNotificationConfigPtrOutput)
}
type FhirStoreNotificationConfigOutput struct { *pulumi.OutputState }
func (FhirStoreNotificationConfigOutput) ElementType() reflect.Type {
return reflect.TypeOf((*FhirStoreNotificationConfig)(nil)).Elem()
}
func (o FhirStoreNotificationConfigOutput) ToFhirStoreNotificationConfigOutput() FhirStoreNotificationConfigOutput {
return o
}
func (o FhirStoreNotificationConfigOutput) ToFhirStoreNotificationConfigOutputWithContext(ctx context.Context) FhirStoreNotificationConfigOutput {
return o
}
func (o FhirStoreNotificationConfigOutput) ToFhirStoreNotificationConfigPtrOutput() FhirStoreNotificationConfigPtrOutput {
return o.ToFhirStoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (o FhirStoreNotificationConfigOutput) ToFhirStoreNotificationConfigPtrOutputWithContext(ctx context.Context) FhirStoreNotificationConfigPtrOutput {
return o.ApplyT(func(v FhirStoreNotificationConfig) *FhirStoreNotificationConfig {
return &v
}).(FhirStoreNotificationConfigPtrOutput)
}
func (o FhirStoreNotificationConfigOutput) PubsubTopic() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreNotificationConfig) string { return v.PubsubTopic }).(pulumi.StringOutput)
}
type FhirStoreNotificationConfigPtrOutput struct { *pulumi.OutputState}
func (FhirStoreNotificationConfigPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**FhirStoreNotificationConfig)(nil)).Elem()
}
func (o FhirStoreNotificationConfigPtrOutput) ToFhirStoreNotificationConfigPtrOutput() FhirStoreNotificationConfigPtrOutput {
return o
}
func (o FhirStoreNotificationConfigPtrOutput) ToFhirStoreNotificationConfigPtrOutputWithContext(ctx context.Context) FhirStoreNotificationConfigPtrOutput {
return o
}
func (o FhirStoreNotificationConfigPtrOutput) Elem() FhirStoreNotificationConfigOutput {
return o.ApplyT(func (v *FhirStoreNotificationConfig) FhirStoreNotificationConfig { return *v }).(FhirStoreNotificationConfigOutput)
}
func (o FhirStoreNotificationConfigPtrOutput) PubsubTopic() pulumi.StringOutput {
return o.ApplyT(func (v FhirStoreNotificationConfig) string { return v.PubsubTopic }).(pulumi.StringOutput)
}
type Hl7StoreIamBindingCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
type Hl7StoreIamBindingConditionInput interface {
pulumi.Input
ToHl7StoreIamBindingConditionOutput() Hl7StoreIamBindingConditionOutput
ToHl7StoreIamBindingConditionOutputWithContext(context.Context) Hl7StoreIamBindingConditionOutput
}
type Hl7StoreIamBindingConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (Hl7StoreIamBindingConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*Hl7StoreIamBindingCondition)(nil)).Elem()
}
func (i Hl7StoreIamBindingConditionArgs) ToHl7StoreIamBindingConditionOutput() Hl7StoreIamBindingConditionOutput {
return i.ToHl7StoreIamBindingConditionOutputWithContext(context.Background())
}
func (i Hl7StoreIamBindingConditionArgs) ToHl7StoreIamBindingConditionOutputWithContext(ctx context.Context) Hl7StoreIamBindingConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreIamBindingConditionOutput)
}
func (i Hl7StoreIamBindingConditionArgs) ToHl7StoreIamBindingConditionPtrOutput() Hl7StoreIamBindingConditionPtrOutput {
return i.ToHl7StoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i Hl7StoreIamBindingConditionArgs) ToHl7StoreIamBindingConditionPtrOutputWithContext(ctx context.Context) Hl7StoreIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreIamBindingConditionOutput).ToHl7StoreIamBindingConditionPtrOutputWithContext(ctx)
}
type Hl7StoreIamBindingConditionPtrInput interface {
pulumi.Input
ToHl7StoreIamBindingConditionPtrOutput() Hl7StoreIamBindingConditionPtrOutput
ToHl7StoreIamBindingConditionPtrOutputWithContext(context.Context) Hl7StoreIamBindingConditionPtrOutput
}
type hl7StoreIamBindingConditionPtrType Hl7StoreIamBindingConditionArgs
func Hl7StoreIamBindingConditionPtr(v *Hl7StoreIamBindingConditionArgs) Hl7StoreIamBindingConditionPtrInput { return (*hl7StoreIamBindingConditionPtrType)(v)
}
func (*hl7StoreIamBindingConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**Hl7StoreIamBindingCondition)(nil)).Elem()
}
func (i *hl7StoreIamBindingConditionPtrType) ToHl7StoreIamBindingConditionPtrOutput() Hl7StoreIamBindingConditionPtrOutput {
return i.ToHl7StoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i *hl7StoreIamBindingConditionPtrType) ToHl7StoreIamBindingConditionPtrOutputWithContext(ctx context.Context) Hl7StoreIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreIamBindingConditionPtrOutput)
}
type Hl7StoreIamBindingConditionOutput struct { *pulumi.OutputState }
func (Hl7StoreIamBindingConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Hl7StoreIamBindingCondition)(nil)).Elem()
}
func (o Hl7StoreIamBindingConditionOutput) ToHl7StoreIamBindingConditionOutput() Hl7StoreIamBindingConditionOutput {
return o
}
func (o Hl7StoreIamBindingConditionOutput) ToHl7StoreIamBindingConditionOutputWithContext(ctx context.Context) Hl7StoreIamBindingConditionOutput {
return o
}
func (o Hl7StoreIamBindingConditionOutput) ToHl7StoreIamBindingConditionPtrOutput() Hl7StoreIamBindingConditionPtrOutput {
return o.ToHl7StoreIamBindingConditionPtrOutputWithContext(context.Background())
}
func (o Hl7StoreIamBindingConditionOutput) ToHl7StoreIamBindingConditionPtrOutputWithContext(ctx context.Context) Hl7StoreIamBindingConditionPtrOutput {
return o.ApplyT(func(v Hl7StoreIamBindingCondition) *Hl7StoreIamBindingCondition {
return &v
}).(Hl7StoreIamBindingConditionPtrOutput)
}
func (o Hl7StoreIamBindingConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v Hl7StoreIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o Hl7StoreIamBindingConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o Hl7StoreIamBindingConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type Hl7StoreIamBindingConditionPtrOutput struct { *pulumi.OutputState}
func (Hl7StoreIamBindingConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Hl7StoreIamBindingCondition)(nil)).Elem()
}
func (o Hl7StoreIamBindingConditionPtrOutput) ToHl7StoreIamBindingConditionPtrOutput() Hl7StoreIamBindingConditionPtrOutput {
return o
}
func (o Hl7StoreIamBindingConditionPtrOutput) ToHl7StoreIamBindingConditionPtrOutputWithContext(ctx context.Context) Hl7StoreIamBindingConditionPtrOutput {
return o
}
func (o Hl7StoreIamBindingConditionPtrOutput) Elem() Hl7StoreIamBindingConditionOutput {
return o.ApplyT(func (v *Hl7StoreIamBindingCondition) Hl7StoreIamBindingCondition { return *v }).(Hl7StoreIamBindingConditionOutput)
}
func (o Hl7StoreIamBindingConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v Hl7StoreIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o Hl7StoreIamBindingConditionPtrOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o Hl7StoreIamBindingConditionPtrOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type Hl7StoreIamMemberCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
type Hl7StoreIamMemberConditionInput interface {
pulumi.Input
ToHl7StoreIamMemberConditionOutput() Hl7StoreIamMemberConditionOutput
ToHl7StoreIamMemberConditionOutputWithContext(context.Context) Hl7StoreIamMemberConditionOutput
}
type Hl7StoreIamMemberConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (Hl7StoreIamMemberConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*Hl7StoreIamMemberCondition)(nil)).Elem()
}
func (i Hl7StoreIamMemberConditionArgs) ToHl7StoreIamMemberConditionOutput() Hl7StoreIamMemberConditionOutput {
return i.ToHl7StoreIamMemberConditionOutputWithContext(context.Background())
}
func (i Hl7StoreIamMemberConditionArgs) ToHl7StoreIamMemberConditionOutputWithContext(ctx context.Context) Hl7StoreIamMemberConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreIamMemberConditionOutput)
}
func (i Hl7StoreIamMemberConditionArgs) ToHl7StoreIamMemberConditionPtrOutput() Hl7StoreIamMemberConditionPtrOutput {
return i.ToHl7StoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i Hl7StoreIamMemberConditionArgs) ToHl7StoreIamMemberConditionPtrOutputWithContext(ctx context.Context) Hl7StoreIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreIamMemberConditionOutput).ToHl7StoreIamMemberConditionPtrOutputWithContext(ctx)
}
type Hl7StoreIamMemberConditionPtrInput interface {
pulumi.Input
ToHl7StoreIamMemberConditionPtrOutput() Hl7StoreIamMemberConditionPtrOutput
ToHl7StoreIamMemberConditionPtrOutputWithContext(context.Context) Hl7StoreIamMemberConditionPtrOutput
}
type hl7StoreIamMemberConditionPtrType Hl7StoreIamMemberConditionArgs
func Hl7StoreIamMemberConditionPtr(v *Hl7StoreIamMemberConditionArgs) Hl7StoreIamMemberConditionPtrInput { return (*hl7StoreIamMemberConditionPtrType)(v)
}
func (*hl7StoreIamMemberConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**Hl7StoreIamMemberCondition)(nil)).Elem()
}
func (i *hl7StoreIamMemberConditionPtrType) ToHl7StoreIamMemberConditionPtrOutput() Hl7StoreIamMemberConditionPtrOutput {
return i.ToHl7StoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i *hl7StoreIamMemberConditionPtrType) ToHl7StoreIamMemberConditionPtrOutputWithContext(ctx context.Context) Hl7StoreIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreIamMemberConditionPtrOutput)
}
type Hl7StoreIamMemberConditionOutput struct { *pulumi.OutputState }
func (Hl7StoreIamMemberConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Hl7StoreIamMemberCondition)(nil)).Elem()
}
func (o Hl7StoreIamMemberConditionOutput) ToHl7StoreIamMemberConditionOutput() Hl7StoreIamMemberConditionOutput {
return o
}
func (o Hl7StoreIamMemberConditionOutput) ToHl7StoreIamMemberConditionOutputWithContext(ctx context.Context) Hl7StoreIamMemberConditionOutput {
return o
}
func (o Hl7StoreIamMemberConditionOutput) ToHl7StoreIamMemberConditionPtrOutput() Hl7StoreIamMemberConditionPtrOutput {
return o.ToHl7StoreIamMemberConditionPtrOutputWithContext(context.Background())
}
func (o Hl7StoreIamMemberConditionOutput) ToHl7StoreIamMemberConditionPtrOutputWithContext(ctx context.Context) Hl7StoreIamMemberConditionPtrOutput {
return o.ApplyT(func(v Hl7StoreIamMemberCondition) *Hl7StoreIamMemberCondition {
return &v
}).(Hl7StoreIamMemberConditionPtrOutput)
}
func (o Hl7StoreIamMemberConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v Hl7StoreIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o Hl7StoreIamMemberConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o Hl7StoreIamMemberConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type Hl7StoreIamMemberConditionPtrOutput struct { *pulumi.OutputState}
func (Hl7StoreIamMemberConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Hl7StoreIamMemberCondition)(nil)).Elem()
}
func (o Hl7StoreIamMemberConditionPtrOutput) ToHl7StoreIamMemberConditionPtrOutput() Hl7StoreIamMemberConditionPtrOutput {
return o
}
func (o Hl7StoreIamMemberConditionPtrOutput) ToHl7StoreIamMemberConditionPtrOutputWithContext(ctx context.Context) Hl7StoreIamMemberConditionPtrOutput {
return o
}
func (o Hl7StoreIamMemberConditionPtrOutput) Elem() Hl7StoreIamMemberConditionOutput {
return o.ApplyT(func (v *Hl7StoreIamMemberCondition) Hl7StoreIamMemberCondition { return *v }).(Hl7StoreIamMemberConditionOutput)
}
func (o Hl7StoreIamMemberConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func (v Hl7StoreIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o Hl7StoreIamMemberConditionPtrOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o Hl7StoreIamMemberConditionPtrOutput) Title() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type Hl7StoreNotificationConfig struct {
PubsubTopic string `pulumi:"pubsubTopic"`
}
type Hl7StoreNotificationConfigInput interface {
pulumi.Input
ToHl7StoreNotificationConfigOutput() Hl7StoreNotificationConfigOutput
ToHl7StoreNotificationConfigOutputWithContext(context.Context) Hl7StoreNotificationConfigOutput
}
type Hl7StoreNotificationConfigArgs struct {
PubsubTopic pulumi.StringInput `pulumi:"pubsubTopic"`
}
func (Hl7StoreNotificationConfigArgs) ElementType() reflect.Type {
return reflect.TypeOf((*Hl7StoreNotificationConfig)(nil)).Elem()
}
func (i Hl7StoreNotificationConfigArgs) ToHl7StoreNotificationConfigOutput() Hl7StoreNotificationConfigOutput {
return i.ToHl7StoreNotificationConfigOutputWithContext(context.Background())
}
func (i Hl7StoreNotificationConfigArgs) ToHl7StoreNotificationConfigOutputWithContext(ctx context.Context) Hl7StoreNotificationConfigOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreNotificationConfigOutput)
}
func (i Hl7StoreNotificationConfigArgs) ToHl7StoreNotificationConfigPtrOutput() Hl7StoreNotificationConfigPtrOutput {
return i.ToHl7StoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (i Hl7StoreNotificationConfigArgs) ToHl7StoreNotificationConfigPtrOutputWithContext(ctx context.Context) Hl7StoreNotificationConfigPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreNotificationConfigOutput).ToHl7StoreNotificationConfigPtrOutputWithContext(ctx)
}
type Hl7StoreNotificationConfigPtrInput interface {
pulumi.Input
ToHl7StoreNotificationConfigPtrOutput() Hl7StoreNotificationConfigPtrOutput
ToHl7StoreNotificationConfigPtrOutputWithContext(context.Context) Hl7StoreNotificationConfigPtrOutput
}
type hl7StoreNotificationConfigPtrType Hl7StoreNotificationConfigArgs
func Hl7StoreNotificationConfigPtr(v *Hl7StoreNotificationConfigArgs) Hl7StoreNotificationConfigPtrInput { return (*hl7StoreNotificationConfigPtrType)(v)
}
func (*hl7StoreNotificationConfigPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**Hl7StoreNotificationConfig)(nil)).Elem()
}
func (i *hl7StoreNotificationConfigPtrType) ToHl7StoreNotificationConfigPtrOutput() Hl7StoreNotificationConfigPtrOutput {
return i.ToHl7StoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (i *hl7StoreNotificationConfigPtrType) ToHl7StoreNotificationConfigPtrOutputWithContext(ctx context.Context) Hl7StoreNotificationConfigPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreNotificationConfigPtrOutput)
}
type Hl7StoreNotificationConfigOutput struct { *pulumi.OutputState }
func (Hl7StoreNotificationConfigOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Hl7StoreNotificationConfig)(nil)).Elem()
}
func (o Hl7StoreNotificationConfigOutput) ToHl7StoreNotificationConfigOutput() Hl7StoreNotificationConfigOutput {
return o
}
func (o Hl7StoreNotificationConfigOutput) ToHl7StoreNotificationConfigOutputWithContext(ctx context.Context) Hl7StoreNotificationConfigOutput {
return o
}
func (o Hl7StoreNotificationConfigOutput) ToHl7StoreNotificationConfigPtrOutput() Hl7StoreNotificationConfigPtrOutput {
return o.ToHl7StoreNotificationConfigPtrOutputWithContext(context.Background())
}
func (o Hl7StoreNotificationConfigOutput) ToHl7StoreNotificationConfigPtrOutputWithContext(ctx context.Context) Hl7StoreNotificationConfigPtrOutput {
return o.ApplyT(func(v Hl7StoreNotificationConfig) *Hl7StoreNotificationConfig {
return &v
}).(Hl7StoreNotificationConfigPtrOutput)
}
func (o Hl7StoreNotificationConfigOutput) PubsubTopic() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreNotificationConfig) string { return v.PubsubTopic }).(pulumi.StringOutput)
}
type Hl7StoreNotificationConfigPtrOutput struct { *pulumi.OutputState}
func (Hl7StoreNotificationConfigPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Hl7StoreNotificationConfig)(nil)).Elem()
}
func (o Hl7StoreNotificationConfigPtrOutput) ToHl7StoreNotificationConfigPtrOutput() Hl7StoreNotificationConfigPtrOutput {
return o
}
func (o Hl7StoreNotificationConfigPtrOutput) ToHl7StoreNotificationConfigPtrOutputWithContext(ctx context.Context) Hl7StoreNotificationConfigPtrOutput {
return o
}
func (o Hl7StoreNotificationConfigPtrOutput) Elem() Hl7StoreNotificationConfigOutput {
return o.ApplyT(func (v *Hl7StoreNotificationConfig) Hl7StoreNotificationConfig { return *v }).(Hl7StoreNotificationConfigOutput)
}
func (o Hl7StoreNotificationConfigPtrOutput) PubsubTopic() pulumi.StringOutput {
return o.ApplyT(func (v Hl7StoreNotificationConfig) string { return v.PubsubTopic }).(pulumi.StringOutput)
}
type Hl7StoreParserConfig struct {
AllowNullHeader *bool `pulumi:"allowNullHeader"`
SegmentTerminator *string `pulumi:"segmentTerminator"`
}
type Hl7StoreParserConfigInput interface {
pulumi.Input
ToHl7StoreParserConfigOutput() Hl7StoreParserConfigOutput
ToHl7StoreParserConfigOutputWithContext(context.Context) Hl7StoreParserConfigOutput
}
type Hl7StoreParserConfigArgs struct {
AllowNullHeader pulumi.BoolPtrInput `pulumi:"allowNullHeader"`
SegmentTerminator pulumi.StringPtrInput `pulumi:"segmentTerminator"`
}
func (Hl7StoreParserConfigArgs) ElementType() reflect.Type {
return reflect.TypeOf((*Hl7StoreParserConfig)(nil)).Elem()
}
func (i Hl7StoreParserConfigArgs) ToHl7StoreParserConfigOutput() Hl7StoreParserConfigOutput {
return i.ToHl7StoreParserConfigOutputWithContext(context.Background())
}
func (i Hl7StoreParserConfigArgs) ToHl7StoreParserConfigOutputWithContext(ctx context.Context) Hl7StoreParserConfigOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreParserConfigOutput)
}
func (i Hl7StoreParserConfigArgs) ToHl7StoreParserConfigPtrOutput() Hl7StoreParserConfigPtrOutput {
return i.ToHl7StoreParserConfigPtrOutputWithContext(context.Background())
}
func (i Hl7StoreParserConfigArgs) ToHl7StoreParserConfigPtrOutputWithContext(ctx context.Context) Hl7StoreParserConfigPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreParserConfigOutput).ToHl7StoreParserConfigPtrOutputWithContext(ctx)
}
type Hl7StoreParserConfigPtrInput interface {
pulumi.Input
ToHl7StoreParserConfigPtrOutput() Hl7StoreParserConfigPtrOutput
ToHl7StoreParserConfigPtrOutputWithContext(context.Context) Hl7StoreParserConfigPtrOutput
}
type hl7StoreParserConfigPtrType Hl7StoreParserConfigArgs
func Hl7StoreParserConfigPtr(v *Hl7StoreParserConfigArgs) Hl7StoreParserConfigPtrInput { return (*hl7StoreParserConfigPtrType)(v)
}
func (*hl7StoreParserConfigPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**Hl7StoreParserConfig)(nil)).Elem()
}
func (i *hl7StoreParserConfigPtrType) ToHl7StoreParserConfigPtrOutput() Hl7StoreParserConfigPtrOutput {
return i.ToHl7StoreParserConfigPtrOutputWithContext(context.Background())
}
func (i *hl7StoreParserConfigPtrType) ToHl7StoreParserConfigPtrOutputWithContext(ctx context.Context) Hl7StoreParserConfigPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(Hl7StoreParserConfigPtrOutput)
}
type Hl7StoreParserConfigOutput struct { *pulumi.OutputState }
func (Hl7StoreParserConfigOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Hl7StoreParserConfig)(nil)).Elem()
}
func (o Hl7StoreParserConfigOutput) ToHl7StoreParserConfigOutput() Hl7StoreParserConfigOutput {
return o
}
func (o Hl7StoreParserConfigOutput) ToHl7StoreParserConfigOutputWithContext(ctx context.Context) Hl7StoreParserConfigOutput {
return o
}
func (o Hl7StoreParserConfigOutput) ToHl7StoreParserConfigPtrOutput() Hl7StoreParserConfigPtrOutput {
return o.ToHl7StoreParserConfigPtrOutputWithContext(context.Background())
}
func (o Hl7StoreParserConfigOutput) ToHl7StoreParserConfigPtrOutputWithContext(ctx context.Context) Hl7StoreParserConfigPtrOutput {
return o.ApplyT(func(v Hl7StoreParserConfig) *Hl7StoreParserConfig {
return &v
}).(Hl7StoreParserConfigPtrOutput)
}
func (o Hl7StoreParserConfigOutput) AllowNullHeader() pulumi.BoolPtrOutput {
return o.ApplyT(func (v Hl7StoreParserConfig) *bool { return v.AllowNullHeader }).(pulumi.BoolPtrOutput)
}
func (o Hl7StoreParserConfigOutput) SegmentTerminator() pulumi.StringPtrOutput {
return o.ApplyT(func (v Hl7StoreParserConfig) *string { return v.SegmentTerminator }).(pulumi.StringPtrOutput)
}
type Hl7StoreParserConfigPtrOutput struct { *pulumi.OutputState}
func (Hl7StoreParserConfigPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Hl7StoreParserConfig)(nil)).Elem()
}
func (o Hl7StoreParserConfigPtrOutput) ToHl7StoreParserConfigPtrOutput() Hl7StoreParserConfigPtrOutput {
return o
}
func (o Hl7StoreParserConfigPtrOutput) ToHl7StoreParserConfigPtrOutputWithContext(ctx context.Context) Hl7StoreParserConfigPtrOutput {
return o
}
func (o Hl7StoreParserConfigPtrOutput) Elem() Hl7StoreParserConfigOutput {
return o.ApplyT(func (v *Hl7StoreParserConfig) Hl7StoreParserConfig { return *v }).(Hl7StoreParserConfigOutput)
}
func (o Hl7StoreParserConfigPtrOutput) AllowNullHeader() pulumi.BoolPtrOutput {
return o.ApplyT(func (v Hl7StoreParserConfig) *bool { return v.AllowNullHeader }).(pulumi.BoolPtrOutput)
}
func (o Hl7StoreParserConfigPtrOutput) SegmentTerminator() pulumi.StringPtrOutput {
return o.ApplyT(func (v Hl7StoreParserConfig) *string { return v.SegmentTerminator }).(pulumi.StringPtrOutput)
}
func init() {
pulumi.RegisterOutputType(DatasetIamBindingConditionOutput{})
pulumi.RegisterOutputType(DatasetIamBindingConditionPtrOutput{})
pulumi.RegisterOutputType(DatasetIamMemberConditionOutput{})
pulumi.RegisterOutputType(DatasetIamMemberConditionPtrOutput{})
pulumi.RegisterOutputType(DicomStoreIamBindingConditionOutput{})
pulumi.RegisterOutputType(DicomStoreIamBindingConditionPtrOutput{})
pulumi.RegisterOutputType(DicomStoreIamMemberConditionOutput{})
pulumi.RegisterOutputType(DicomStoreIamMemberConditionPtrOutput{})
pulumi.RegisterOutputType(DicomStoreNotificationConfigOutput{})
pulumi.RegisterOutputType(DicomStoreNotificationConfigPtrOutput{})
pulumi.RegisterOutputType(FhirStoreIamBindingConditionOutput{})
pulumi.RegisterOutputType(FhirStoreIamBindingConditionPtrOutput{})
pulumi.RegisterOutputType(FhirStoreIamMemberConditionOutput{})
pulumi.RegisterOutputType(FhirStoreIamMemberConditionPtrOutput{})
pulumi.RegisterOutputType(FhirStoreNotificationConfigOutput{})
pulumi.RegisterOutputType(FhirStoreNotificationConfigPtrOutput{})
pulumi.RegisterOutputType(Hl7StoreIamBindingConditionOutput{})
pulumi.RegisterOutputType(Hl7StoreIamBindingConditionPtrOutput{})
pulumi.RegisterOutputType(Hl7StoreIamMemberConditionOutput{})
pulumi.RegisterOutputType(Hl7StoreIamMemberConditionPtrOutput{})
pulumi.RegisterOutputType(Hl7StoreNotificationConfigOutput{})
pulumi.RegisterOutputType(Hl7StoreNotificationConfigPtrOutput{})
pulumi.RegisterOutputType(Hl7StoreParserConfigOutput{})
pulumi.RegisterOutputType(Hl7StoreParserConfigPtrOutput{})
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
cmd/kubernetesDeploy_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type kubernetesDeployOptions struct {
AdditionalParameters []string `json:"additionalParameters,omitempty"`
APIServer string `json:"apiServer,omitempty"`
AppTemplate string `json:"appTemplate,omitempty"`
ChartPath string `json:"chartPath,omitempty"`
ContainerRegistryPassword string `json:"containerRegistryPassword,omitempty"`
ContainerRegistryURL string `json:"containerRegistryUrl,omitempty"`
ContainerRegistryUser string `json:"containerRegistryUser,omitempty"`
ContainerRegistrySecret string `json:"containerRegistrySecret,omitempty"`
CreateDockerRegistrySecret bool `json:"createDockerRegistrySecret,omitempty"`
DeploymentName string `json:"deploymentName,omitempty"`
DeployTool string `json:"deployTool,omitempty"`
HelmDeployWaitSeconds int `json:"helmDeployWaitSeconds,omitempty"`
HelmValues []string `json:"helmValues,omitempty"`
Image string `json:"image,omitempty"`
IngressHosts []string `json:"ingressHosts,omitempty"`
KubeConfig string `json:"kubeConfig,omitempty"`
KubeContext string `json:"kubeContext,omitempty"`
KubeToken string `json:"kubeToken,omitempty"`
Namespace string `json:"namespace,omitempty"`
TillerNamespace string `json:"tillerNamespace,omitempty"`
}
// KubernetesDeployCommand Deployment to Kubernetes test or production namespace within the specified Kubernetes cluster.
func KubernetesDeployCommand() *cobra.Command {
const STEP_NAME = "kubernetesDeploy"
metadata := kubernetesDeployMetadata()
var stepConfig kubernetesDeployOptions
var startTime time.Time
var createKubernetesDeployCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Deployment to Kubernetes test or production namespace within the specified Kubernetes cluster.",
Long: `Deployment to Kubernetes test or production namespace within the specified Kubernetes cluster.
!!! note "Deployment supports multiple deployment tools"
Currently the following are supported:
* [Helm](https://helm.sh/) command line tool and [Helm Charts](https://docs.helm.sh/developing_charts/#charts).
* [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and ` + "`" + `kubectl apply` + "`" + ` command.
## Helm
Following helm command will be executed by default:
` + "`" + `` + "`" + `` + "`" + `
helm upgrade <deploymentName> <chartPath> --install --force --namespace <namespace> --wait --timeout <helmDeployWaitSeconds> --set "image.repository=<yourRegistry>/<yourImageName>,image.tag=<yourImageTag>,secret.dockerconfigjson=<dockerSecret>,ingress.hosts[0]=<ingressHosts[0]>,,ingress.hosts[1]=<ingressHosts[1]>,...
` + "`" + `` + "`" + `` + "`" + `
* ` + "`" + `yourRegistry` + "`" + ` will be retrieved from ` + "`" + `containerRegistryUrl` + "`" + `
* ` + "`" + `yourImageName` + "`" + `, ` + "`" + `yourImageTag` + "`" + ` will be retrieved from ` + "`" + `image` + "`" + `
* ` + "`" + `dockerSecret` + "`" + ` will be calculated with a call to ` + "`" + `kubectl create secret docker-registry regsecret --docker-server=<yourRegistry> --docker-username=<containerRegistryUser> --docker-password=<containerRegistryPassword> --dry-run=true --output=json'` + "`" + ``,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.ContainerRegistryPassword)
log.RegisterSecret(stepConfig.ContainerRegistryUser)
log.RegisterSecret(stepConfig.KubeConfig)
log.RegisterSecret(stepConfig.KubeToken)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
kubernetesDeploy(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addKubernetesDeployFlags(createKubernetesDeployCmd, &stepConfig)
return createKubernetesDeployCmd
}
func addKubernetesDeployFlags(cmd *cobra.Command, stepConfig *kubernetesDeployOptions) {
cmd.Flags().StringSliceVar(&stepConfig.AdditionalParameters, "additionalParameters", []string{}, "Defines additional parameters for \"helm install\" or \"kubectl apply\" command.")
cmd.Flags().StringVar(&stepConfig.APIServer, "apiServer", os.Getenv("PIPER_apiServer"), "Defines the Url of the API Server of the Kubernetes cluster.")
cmd.Flags().StringVar(&stepConfig.AppTemplate, "appTemplate", os.Getenv("PIPER_appTemplate"), "Defines the filename for the kubernetes app template (e.g. k8s_apptemplate.yaml)")
cmd.Flags().StringVar(&stepConfig.ChartPath, "chartPath", os.Getenv("PIPER_chartPath"), "Defines the chart path for deployments using helm.")
cmd.Flags().StringVar(&stepConfig.ContainerRegistryPassword, "containerRegistryPassword", os.Getenv("PIPER_containerRegistryPassword"), "Password for container registry access - typically provided by the CI/CD environment.")
cmd.Flags().StringVar(&stepConfig.ContainerRegistryURL, "containerRegistryUrl", os.Getenv("PIPER_containerRegistryUrl"), "http(s) url of the Container registry where the image to deploy is located.")
cmd.Flags().StringVar(&stepConfig.ContainerRegistryUser, "containerRegistryUser", os.Getenv("PIPER_containerRegistryUser"), "Username for container registry access - typically provided by the CI/CD environment.")
cmd.Flags().StringVar(&stepConfig.ContainerRegistrySecret, "containerRegistrySecret", `regsecret`, "Name of the container registry secret used for pulling containers from the registry.")
cmd.Flags().BoolVar(&stepConfig.CreateDockerRegistrySecret, "createDockerRegistrySecret", false, "Only for `deployTool:kubectl`: Toggle to turn on `containerRegistrySecret` creation.")
cmd.Flags().StringVar(&stepConfig.DeploymentName, "deploymentName", os.Getenv("PIPER_deploymentName"), "Defines the name of the deployment.")
cmd.Flags().StringVar(&stepConfig.DeployTool, "deployTool", `kubectl`, "Defines the tool which should be used for deployment.")
cmd.Flags().IntVar(&stepConfig.HelmDeployWaitSeconds, "helmDeployWaitSeconds", 300, "Number of seconds before helm deploy returns.")
cmd.Flags().StringSliceVar(&stepConfig.HelmValues, "helmValues", []string{}, "List of helm values as YAML file reference or URL (as per helm parameter description for `-f` / `--values`)")
cmd.Flags().StringVar(&stepConfig.Image, "image", os.Getenv("PIPER_image"), "Full name of the image to be deployed.")
cmd.Flags().StringSliceVar(&stepConfig.IngressHosts, "ingressHosts", []string{}, "(Deprecated) List of ingress hosts to be exposed via helm deployment.")
cmd.Flags().StringVar(&stepConfig.KubeConfig, "kubeConfig", os.Getenv("PIPER_kubeConfig"), "Defines the path to the \"kubeconfig\" file.")
cmd.Flags().StringVar(&stepConfig.KubeContext, "kubeContext", os.Getenv("PIPER_kubeContext"), "Defines the context to use from the \"kubeconfig\" file.")
cmd.Flags().StringVar(&stepConfig.KubeToken, "kubeToken", os.Getenv("PIPER_kubeToken"), "Contains the id_token used by kubectl for authentication. Consider using kubeConfig parameter instead.")
cmd.Flags().StringVar(&stepConfig.Namespace, "namespace", `default`, "Defines the target Kubernetes namespace for the deployment.")
cmd.Flags().StringVar(&stepConfig.TillerNamespace, "tillerNamespace", os.Getenv("PIPER_tillerNamespace"), "Defines optional tiller namespace for deployments using helm.")
cmd.MarkFlagRequired("chartPath")
cmd.MarkFlagRequired("containerRegistryUrl")
cmd.MarkFlagRequired("deploymentName")
cmd.MarkFlagRequired("deployTool")
cmd.MarkFlagRequired("image")
}
// retrieve step metadata
func kubernetesDeployMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "kubernetesDeploy",
Aliases: []config.Alias{{Name: "deployToKubernetes", Deprecated: true}},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "additionalParameters",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmDeploymentParameters"}},
},
{
Name: "apiServer",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "k8sAPIServer"}},
},
{
Name: "appTemplate",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "k8sAppTemplate"}},
},
{
Name: "chartPath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "helmChartPath"}},
},
{
Name: "containerRegistryPassword",
ResourceRef: []config.ResourceReference{
{
Name: "dockerCredentialsId",
Param: "password",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "containerRegistryUrl",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "container/registryUrl",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "dockerRegistryUrl"}},
},
{
Name: "containerRegistryUser",
ResourceRef: []config.ResourceReference{
{
Name: "dockerCredentialsId",
Param: "username",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "containerRegistrySecret",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "createDockerRegistrySecret",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "deploymentName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "helmDeploymentName"}},
},
{
Name: "deployTool",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "helmDeployWaitSeconds",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "helmValues",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "image",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "container/imageNameTag",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "deployImage"}},
},
{
Name: "ingressHosts",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "kubeConfig",
ResourceRef: []config.ResourceReference{
{
Name: "kubeConfigFileCredentialsId",
Type: "secret",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "kubeContext",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "kubeToken",
ResourceRef: []config.ResourceReference{
{
Name: "kubeTokenCredentialsId",
Type: "secret",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "namespace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmDeploymentNamespace"}, {Name: "k8sDeploymentNamespace"}},
},
{
Name: "tillerNamespace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmTillerNamespace"}},
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_apiServer\"",
"\"PIPER_appTemplate\"",
"\"PIPER_chartPath\"",
"\"PIPER_containerRegistryPassword\"",
"\"PIPER_containerRegistryUrl\"",
"\"PIPER_containerRegistryUser\"",
"\"PIPER_deploymentName\"",
"\"PIPER_image\"",
"\"PIPER_kubeConfig\"",
"\"PIPER_kubeContext\"",
"\"PIPER_kubeToken\"",
"\"PIPER_tillerNamespace\""
] |
[] |
[
"PIPER_deploymentName",
"PIPER_tillerNamespace",
"PIPER_appTemplate",
"PIPER_containerRegistryPassword",
"PIPER_chartPath",
"PIPER_image",
"PIPER_kubeConfig",
"PIPER_containerRegistryUser",
"PIPER_containerRegistryUrl",
"PIPER_apiServer",
"PIPER_kubeToken",
"PIPER_kubeContext"
] |
[]
|
["PIPER_deploymentName", "PIPER_tillerNamespace", "PIPER_appTemplate", "PIPER_containerRegistryPassword", "PIPER_chartPath", "PIPER_image", "PIPER_kubeConfig", "PIPER_containerRegistryUser", "PIPER_containerRegistryUrl", "PIPER_apiServer", "PIPER_kubeToken", "PIPER_kubeContext"]
|
go
| 12 | 0 | |
tools/train_voc.py
|
# -*- coding: utf-8 -*-
# @Time : 2018/9/26 15:48
# @Author : HLin
# @Email : [email protected]
# @File : train_voc.py
# @Software: PyCharm
import os
import pprint
import logging
import argparse
import torch
import torch.nn as nn
from tqdm import tqdm
import numpy as np
from math import ceil
from distutils.version import LooseVersion
from tensorboardX import SummaryWriter
import sys
sys.path.append(os.path.abspath('..'))
from graphs.models.sync_batchnorm.replicate import patch_replication_callback
from utils.data_utils import calculate_weigths_labels
from utils import Eval
from graphs.models.decoder import DeepLab
from datasets.NYU_Dataset import Nyud2_DataLoader
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
class Trainer():
def __init__(self, args, cuda=None):
self.args = args
os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
self.cuda = cuda and torch.cuda.is_available()
self.device = torch.device('cuda' if self.cuda else 'cpu')
self.current_MIoU = 0
self.best_MIou = 0
self.current_epoch = 0
self.current_iter = 0
# set TensorboardX
self.writer = SummaryWriter()
# Metric definition
self.Eval = Eval(self.args.num_classes)
# loss definition
if self.args.loss_weight_file is not None:
classes_weights_path = os.path.join(self.args.loss_weights_dir, self.args.loss_weight_file)
print(classes_weights_path)
if not os.path.isfile(classes_weights_path):
logger.info('calculating class weights...')
calculate_weigths_labels(self.args)
class_weights = np.load(classes_weights_path)
pprint.pprint(class_weights)
weight = torch.from_numpy(class_weights.astype(np.float32))
logger.info('loading class weights successfully!')
else:
weight = None
self.loss = nn.CrossEntropyLoss(weight=weight, ignore_index=255)
self.loss.to(self.device)
# model
self.model = DeepLab(output_stride=self.args.output_stride,
class_num=self.args.num_classes,
pretrained=self.args.imagenet_pretrained and self.args.pretrained_ckpt_file==None,
bn_momentum=self.args.bn_momentum,
freeze_bn=self.args.freeze_bn)
self.model = nn.DataParallel(self.model, device_ids=range(ceil(len(self.args.gpu)/2)))
patch_replication_callback(self.model)
self.model.to(self.device)
self.optimizer = torch.optim.SGD(
params=[
{
"params": self.get_params(self.model.module, key="1x"),
"lr": self.args.lr,
},
{
"params": self.get_params(self.model.module, key="10x"),
"lr": 10 * self.args.lr,
},
],
momentum=self.args.momentum,
# dampening=self.args.dampening,
weight_decay=self.args.weight_decay,
# nesterov=self.args.nesterov
)
# dataloader
self.dataloader = Nyud2_DataLoader(self.args)
self.epoch_num = ceil(self.args.iter_max / self.dataloader.train_iterations)
def main(self):
# display args details
logger.info("Global configuration as follows:")
for key, val in vars(self.args).items():
logger.info("{:16} {}".format(key, val))
# choose cuda
if self.cuda:
# torch.cuda.set_device(4)
current_device = torch.cuda.current_device()
logger.info("This model will run on {}".format(torch.cuda.get_device_name(current_device)))
else:
logger.info("This model will run on CPU")
# load pretrained checkpoint
if self.args.pretrained_ckpt_file is not None:
self.load_checkpoint(self.args.pretrained_ckpt_file)
# train
self.train()
self.writer.close()
def train(self):
for epoch in tqdm(range(self.current_epoch, self.epoch_num),
desc="Total {} epochs".format(self.epoch_num)):
self.current_epoch = epoch
# self.scheduler.step(epoch)
self.train_one_epoch()
# validate
if self.args.validation == True:
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.save_checkpoint(is_best, train_id+'best.pth')
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
logger.info("=>saving the final checkpoint...")
torch.save(state, train_id + 'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.train_loader, total=self.dataloader.train_iterations,
desc="Train Epoch-{}-".format(self.current_epoch+1))
logger.info("Training one epoch...")
self.Eval.reset()
# Set the model to be in training mode (for batchnorm and dropout)
train_loss = []
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, depth in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.current_iter >= self.args.iter_max:
logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
self.writer.add_scalar('learning_rate_10x', self.optimizer.param_groups[1]["lr"], self.current_iter)
# y.to(torch.long)
if self.cuda:
x, y, depth = x.to(self.device), y.to(device=self.device, dtype=torch.long), depth.to(self.device)
self.optimizer.zero_grad()
# model
pred = self.model(x,depth)
# logger.info("pre:{}".format(pred.data.cpu().numpy()))
y = torch.squeeze(y, 1)
# logger.info("y:{}".format(y.cpu().numpy()))
# pred_s = F.softmax(pred, dim=1)
# loss
cur_loss = self.loss(pred, y)
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 50 == 0:
logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
# print(cur_loss)
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
PA = self.Eval.Pixel_Accuracy()
MPA = self.Eval.Mean_Pixel_Accuracy()
MIoU = self.Eval.Mean_Intersection_over_Union()
FWIoU = self.Eval.Frequency_Weighted_Intersection_over_Union()
logger.info('Epoch:{}, train PA1:{}, MPA1:{}, MIoU1:{}, FWIoU1:{}'.format(self.current_epoch, PA, MPA,
MIoU, FWIoU))
tr_loss = sum(train_loss)/len(train_loss)
self.writer.add_scalar('train_loss', tr_loss, self.current_epoch)
tqdm.write("The average loss of train epoch-{}-:{}".format(self.current_epoch, tr_loss))
tqdm_epoch.close()
def validate(self):
logger.info('validating one epoch...')
self.Eval.reset()
with torch.no_grad():
tqdm_batch = tqdm(self.dataloader.valid_loader, total=self.dataloader.valid_iterations,
desc="Val Epoch-{}-".format(self.current_epoch + 1))
val_loss = []
preds = []
lab = []
self.model.eval()
for x, y, id in tqdm_batch:
# y.to(torch.long)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
# model
pred = self.model(x)
y = torch.squeeze(y, 1)
cur_loss = self.loss(pred, y)
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during validating...')
val_loss.append(cur_loss.item())
# if self.args.store_result == True and self.current_epoch == 20:
# for i in range(len(id)):
# result = Image.fromarray(np.asarray(argpred, dtype=np.uint8)[i], mode='P')
# # logger.info("before:{}".format(result.mode))
# result = result.convert("RGB")
# # logger.info("after:{}".format(result.mode))
# # logger.info("shape:{}".format(result.getpixel((1,1))))
# result.save(self.args.result_filepath + id[i] + '.png')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
PA = self.Eval.Pixel_Accuracy()
MPA = self.Eval.Mean_Pixel_Accuracy()
MIoU = self.Eval.Mean_Intersection_over_Union()
FWIoU = self.Eval.Frequency_Weighted_Intersection_over_Union()
logger.info('Epoch:{}, validation PA1:{}, MPA1:{}, MIoU1:{}, FWIoU1:{}'.format(self.current_epoch, PA, MPA,
MIoU, FWIoU))
v_loss = sum(val_loss) / len(val_loss)
logger.info("The average loss of val loss:{}".format(v_loss))
self.writer.add_scalar('val_loss', v_loss, self.current_epoch)
# logger.info(score)
tqdm_batch.close()
return PA, MPA, MIoU, FWIoU
def save_checkpoint(self, is_best, filename=None):
"""
Save checkpoint if a new best is achieved
:param state:
:param is_best:
:param filepath:
:return:
"""
filename = os.path.join(self.args.checkpoint_dir, filename)
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou':self.best_MIou
}
if is_best:
logger.info("=>saving a new best checkpoint...")
torch.save(state, filename)
else:
logger.info("=> The MIoU of val does't improve.")
def load_checkpoint(self, filename):
filename = os.path.join(self.args.checkpoint_dir, filename)
try:
logger.info("Loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
# self.current_epoch = checkpoint['epoch']
# self.current_iter = checkpoint['iteration']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_MIou = checkpoint['best_MIou']
logger.info("Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {},MIoU:{})\n"
.format(self.args.checkpoint_dir, checkpoint['epoch'], checkpoint['iteration'],
checkpoint['best_MIou']))
except OSError as e:
logger.info("No checkpoint exists from '{}'. Skipping...".format(self.args.checkpoint_dir))
logger.info("**First time to train**")
def get_params(self, model, key):
# For Dilated CNN
if key == "1x":
for m in model.named_modules():
if "Resnet101" in m[0]:
for p in m[1].parameters():
yield p
#
if key == "10x":
for m in model.named_modules():
if "encoder" in m[0] or "decoder" in m[0]:
for p in m[1].parameters():
yield p
def poly_lr_scheduler(self, optimizer, init_lr, iter, max_iter, power):
new_lr = init_lr * (1 - float(iter) / max_iter) ** power
optimizer.param_groups[0]["lr"] = new_lr
optimizer.param_groups[1]["lr"] = 10 * new_lr
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), 'PyTorch>=0.4.0 is required'
arg_parser = argparse.ArgumentParser()
# Path related arguments
arg_parser.add_argument('--data_root_path', type=str, default="/home/feizy/datasets/nyuv2/",
help="the root path of dataset")
arg_parser.add_argument('--checkpoint_dir', default=os.path.abspath('..') + "/checkpoints/",
help="the path of ckpt file")
arg_parser.add_argument('--result_filepath', default="/home/feizy/PycharmProjects/Deeplab-v3plus/data/NYUDV2/Results/",
help="the filepath where mask store")
arg_parser.add_argument('--loss_weights_dir', default="/data/linhua/VOCdevkit/pretrained_weights/")
# Model related arguments
arg_parser.add_argument('--backbone', default='resnet101',
help="backbone of encoder")
arg_parser.add_argument('--output_stride', type=int, default=16, choices=[8, 16],
help="choose from 8 or 16")
arg_parser.add_argument('--bn_momentum', type=float, default=0.1,
help="batch normalization momentum")
arg_parser.add_argument('--imagenet_pretrained', type=str2bool, default=False,
help="whether apply iamgenet pretrained weights")
arg_parser.add_argument('--pretrained_ckpt_file', type=str, default=None,
help="whether apply pretrained checkpoint")
arg_parser.add_argument('--save_ckpt_file', type=str2bool, default=True,
help="whether to save trained checkpoint file ")
arg_parser.add_argument('--store_result_mask', type=str2bool, default=True,
help="whether store mask after val or test")
arg_parser.add_argument('--loss_weight_file', type=str, default=None,
help="the filename of weights for loss function")
arg_parser.add_argument('--validation', type=str2bool, default=True,
help="whether to val after each train epoch")
# train related arguments
arg_parser.add_argument('--gpu', type=str, default="0",
help=" the num of gpu")
arg_parser.add_argument('--batch_size_per_gpu', default=2, type=int,
help='input batch size')
# dataset related arguments
arg_parser.add_argument('--dataset', default='nyudv2', type=str,
help='dataset choice')
arg_parser.add_argument('--base_size', default=(640,480), type=int,
help='crop size of image')
arg_parser.add_argument('--crop_size', default=(640,480), type=int,
help='base size of image')
arg_parser.add_argument('--num_classes', default=21, type=int,
help='num class of mask')
arg_parser.add_argument('--data_loader_workers', default=16, type=int,
help='num_workers of Dataloader')
arg_parser.add_argument('--pin_memory', default=2, type=int,
help='pin_memory of Dataloader')
arg_parser.add_argument('--split', type=str, default='train',
help="choose from train/val/test/trainval")
# optimization related arguments
arg_parser.add_argument('--freeze_bn', type=str2bool, default=False,
help="whether freeze BatchNormalization")
arg_parser.add_argument('--momentum', type=float, default=0.9)
arg_parser.add_argument('--dampening', type=float, default=0)
arg_parser.add_argument('--nesterov', type=str2bool, default=True)
arg_parser.add_argument('--weight_decay', type=float, default=4e-5)
arg_parser.add_argument('--lr', type=float, default=0.007,
help="init learning rate ")
arg_parser.add_argument('--iter_max', type=int, default=30000,
help="the maxinum of iteration")
arg_parser.add_argument('--poly_power', type=float, default=0.9,
help="poly_power")
arg_parser.add_argument('--batch_size', type=int, default=4)
args = arg_parser.parse_args()
train_id = str(args.backbone) + '_' + str(args.output_stride)
train_id += '_iamgenet_pre-' + str(args.imagenet_pretrained)
train_id += '_ckpt_file-' + str(args.pretrained_ckpt_file)
train_id += '_loss_weight_file-' + str(args.loss_weight_file)
train_id += '_batch_size-' + str(args.batch_size)
train_id += '_base_size-' + str(args.base_size)
train_id += '_crop_size-' + str(args.crop_size)
train_id += '_split-' + str(args.split)
train_id += '_lr-' + str(args.lr)
train_id += '_iter_max-' + str(args.iter_max)
# logger configure
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(train_id+'.txt')
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
agent = Trainer(args=args, cuda=True)
agent.main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
test/e2e/cli/kube/kubectl.go
|
package kube
import (
"encoding/json"
"fmt"
"os"
"strings"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/sethvargo/go-password/password"
v1 "github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1"
cli "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/cli"
)
// GenKubeVersion
func GenKubeVersion(fullVersion string) string {
version := strings.Split(fullVersion, ".")
return fmt.Sprintf("Major:\"%s\", Minor:\"%s\"", version[0], version[1])
}
// GetPodStatus status.phase
func GetPodStatus(ns string) func() string {
return func() string {
session := cli.Execute("kubectl", "get", "pods", "-l", "app.kubernetes.io/instance=mongodb-atlas-kubernetes-operator", "-o", "jsonpath={.items[0].status.phase}", "-n", ns)
return string(session.Wait("1m").Out.Contents())
}
}
// DescribeOperatorPod performs "kubectl describe" to get Operator pod information
func DescribeOperatorPod(ns string) string {
session := cli.Execute("kubectl", "describe", "pods", "-l", "app.kubernetes.io/instance=mongodb-atlas-kubernetes-operator", "-n", ns)
return string(session.Wait("1m").Out.Contents())
}
// GetGeneration .status.observedGeneration
func GetGeneration(ns, resourceName string) string {
session := cli.Execute("kubectl", "get", resourceName, "-n", ns, "-o", "jsonpath={.status.observedGeneration}")
return string(session.Wait("1m").Out.Contents())
}
// GetStatusCondition .status.conditions.type=Ready.status
func GetStatusCondition(ns string, atlasname string) func() string {
return func() string {
session := cli.Execute("kubectl", "get", atlasname, "-n", ns, "-o", "jsonpath={.status.conditions[?(@.type=='Ready')].status}")
return string(session.Wait("1m").Out.Contents())
}
}
func GetStatusPhase(ns string, args ...string) string {
args = append([]string{"get"}, args...)
args = append(args, "-o", "jsonpath={..status.phase}", "-n", ns)
session := cli.Execute("kubectl", args...)
return string(session.Wait("1m").Out.Contents())
}
// GetProjectResource
func GetProjectResource(namespace, rName string) v1.AtlasProject {
session := cli.Execute("kubectl", "get", rName, "-n", namespace, "-o", "json")
output := session.Wait("1m").Out.Contents()
var project v1.AtlasProject
ExpectWithOffset(1, json.Unmarshal(output, &project)).ShouldNot(HaveOccurred())
return project
}
// GetClusterResource
func GetClusterResource(namespace, rName string) v1.AtlasCluster {
session := cli.Execute("kubectl", "get", rName, "-n", namespace, "-o", "json")
output := session.Wait("1m").Out.Contents()
var cluster v1.AtlasCluster
ExpectWithOffset(1, json.Unmarshal(output, &cluster)).ShouldNot(HaveOccurred())
return cluster
}
func GetK8sClusterStateName(ns, rName string) string {
return GetClusterResource(ns, rName).Status.StateName
}
func DeleteNamespace(ns string) *Buffer {
session := cli.Execute("kubectl", "delete", "namespace", ns)
return session.Wait("2m").Out
}
func SwitchContext(name string) {
session := cli.Execute("kubectl", "config", "use-context", name)
EventuallyWithOffset(1, session.Wait()).Should(Say("created"))
}
func GetVersionOutput() *Buffer {
session := cli.Execute("kubectl", "version")
return session.Wait().Out
}
func Apply(args ...string) *Buffer {
if args[0] == "-k" {
args = append([]string{"apply"}, args...)
} else {
args = append([]string{"apply", "-f"}, args...)
}
session := cli.Execute("kubectl", args...)
EventuallyWithOffset(1, session).ShouldNot(Say("error"))
return session.Wait().Out
}
func Delete(args ...string) *Buffer {
args = append([]string{"delete", "-f"}, args...)
session := cli.Execute("kubectl", args...)
return session.Wait("10m").Out
}
func DeleteResource(rType, name, ns string) {
session := cli.Execute("kubectl", "delete", rType, name, "-n", ns)
cli.SessionShouldExit(session)
}
func CreateNamespace(name string) *Buffer {
session := cli.Execute("kubectl", "create", "namespace", name)
result := cli.GetSessionExitMsg(session)
ExpectWithOffset(1, result).Should(SatisfyAny(Say("created"), Say("already exists")), "Can't create namespace")
return session.Out
}
func CreateUserSecret(name, ns string) {
secret, _ := password.Generate(10, 3, 0, false, false)
session := cli.ExecuteWithoutWriter("kubectl", "create", "secret", "generic", name,
"--from-literal=password="+secret,
"-n", ns,
)
EventuallyWithOffset(1, session.Wait()).Should(Say(name + " created"))
}
func CreateApiKeySecret(keyName, ns string) { // TODO add ns
session := cli.ExecuteWithoutWriter("kubectl", "create", "secret", "generic", keyName,
"--from-literal=orgId="+os.Getenv("MCLI_ORG_ID"),
"--from-literal=publicApiKey="+os.Getenv("MCLI_PUBLIC_API_KEY"),
"--from-literal=privateApiKey="+os.Getenv("MCLI_PRIVATE_API_KEY"),
"-n", ns,
)
EventuallyWithOffset(1, session.Wait()).Should(Say(keyName + " created"))
}
func CreateApiKeySecretFrom(keyName, ns, orgId, public, private string) { // TODO
session := cli.Execute("kubectl", "create", "secret", "generic", keyName,
"--from-literal=orgId="+os.Getenv("MCLI_ORG_ID"),
"--from-literal=publicApiKey="+public,
"--from-literal=privateApiKey="+private,
"-n", ns,
)
EventuallyWithOffset(1, session.Wait()).Should(Say(keyName + " created"))
}
func DeleteApiKeySecret(keyName, ns string) {
session := cli.Execute("kubectl", "delete", "secret", keyName, "-n", ns)
EventuallyWithOffset(1, session).Should(gexec.Exit(0))
}
func GetManagerLogs(ns string) []byte {
session := cli.ExecuteWithoutWriter("kubectl", "logs", "deploy/mongodb-atlas-operator", "manager", "-n", ns)
EventuallyWithOffset(1, session).Should(gexec.Exit(0))
return session.Out.Contents()
}
func GetTestAppLogs(label, ns string) []byte {
session := cli.ExecuteWithoutWriter("kubectl", "logs", "-l", label, "-n", ns)
EventuallyWithOffset(1, session).Should(gexec.Exit(0))
return session.Out.Contents()
}
func DescribeTestApp(label, ns string) []byte {
session := cli.Execute("kubectl", "describe", "pods", "-l", label, "-n", ns)
return session.Wait("1m").Out.Contents()
}
func GetYamlResource(resource string, ns string) []byte {
session := cli.ExecuteWithoutWriter("kubectl", "get", resource, "-o", "yaml", "-n", ns)
EventuallyWithOffset(1, session).Should(gexec.Exit(0))
return session.Out.Contents()
}
func CreateConfigMapWithLiterals(configName string, ns string, keys ...string) {
args := append([]string{"create", "configmap", configName, "-n", ns}, keys...)
session := cli.Execute("kubectl", args...)
EventuallyWithOffset(1, session).Should(gexec.Exit(0))
}
func HasConfigMap(configName, ns string) bool {
session := cli.Execute("kubectl", "get", "configmap", configName, "-n", ns)
cli.SessionShouldExit(session)
return session.ExitCode() == 0
}
func GetResourceCreationTimestamp(resource, name, ns string) []byte {
session := cli.Execute("kubectl", "get", resource, name, "-n", ns, "-o", "jsonpath={.metadata.creationTimestamp}")
EventuallyWithOffset(1, session).Should(gexec.Exit(0))
return session.Out.Contents()
}
func Annotate(resource, annotation, ns string) {
session := cli.Execute("kubectl", "annotate", resource, annotation, "-n", ns, "--overwrite=true")
EventuallyWithOffset(1, session).Should(gexec.Exit(0))
}
|
[
"\"MCLI_ORG_ID\"",
"\"MCLI_PUBLIC_API_KEY\"",
"\"MCLI_PRIVATE_API_KEY\"",
"\"MCLI_ORG_ID\""
] |
[] |
[
"MCLI_PRIVATE_API_KEY",
"MCLI_ORG_ID",
"MCLI_PUBLIC_API_KEY"
] |
[]
|
["MCLI_PRIVATE_API_KEY", "MCLI_ORG_ID", "MCLI_PUBLIC_API_KEY"]
|
go
| 3 | 0 | |
lib/flows/general/administrative_test.py
|
#!/usr/bin/env python
"""Tests for administrative flows."""
import os
import subprocess
import sys
import time
import psutil
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import email_alerts
from grr.lib import flags
from grr.lib import flow
from grr.lib import maintenance_utils
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
# pylint: disable=unused-import
from grr.lib.flows.general import administrative
# For AuditEventListener, needed to handle published audit events.
from grr.lib.flows.general import audit as _
from grr.lib.flows.general import discovery
# pylint: enable=unused-import
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import protodict as rdf_protodict
class AdministrativeFlowTests(test_lib.FlowTestsBaseclass):
pass
class TestAdministrativeFlows(AdministrativeFlowTests):
"""Tests the administrative flows."""
def setUp(self):
super(TestAdministrativeFlows, self).setUp()
test_tmp = os.environ.get("TEST_TMPDIR")
if test_tmp:
self.tempdir_overrider = test_lib.ConfigOverrider({})
self.tempdir_overrider.Start()
def tearDown(self):
super(TestAdministrativeFlows, self).tearDown()
try:
self.tempdir_overrider.Stop()
except AttributeError:
pass
def testUpdateConfig(self):
"""Ensure we can retrieve and update the config."""
# Only mock the pieces we care about.
client_mock = action_mocks.ActionMock("GetConfiguration",
"UpdateConfiguration")
loc = "http://www.example.com"
new_config = rdf_protodict.Dict(
{"Client.control_urls": [loc],
"Client.foreman_check_frequency": 3600,
"Client.poll_min": 1})
# Setting config options is disallowed in tests so we need to temporarily
# revert this.
with utils.Stubber(config_lib.CONFIG, "Set",
config_lib.CONFIG.Set.old_target):
# Write the config.
for _ in test_lib.TestFlowHelper("UpdateConfiguration", client_mock,
client_id=self.client_id,
token=self.token,
config=new_config):
pass
# Now retrieve it again to see if it got written.
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
config_dat = fd.Get(fd.Schema.GRR_CONFIGURATION)
self.assertEqual(config_dat["Client.control_urls"], [loc])
self.assertEqual(config_dat["Client.poll_min"], 1)
def CheckCrash(self, crash, expected_session_id):
"""Checks that ClientCrash object's fields are correctly filled in."""
self.assertTrue(crash is not None)
self.assertEqual(crash.client_id, self.client_id)
self.assertEqual(crash.session_id, expected_session_id)
self.assertEqual(crash.client_info.client_name, "GRR Monitor")
self.assertEqual(
crash.crash_type,
"aff4:/flows/" + queues.FLOWS.Basename() + ":CrashHandler")
self.assertEqual(crash.crash_message,
"Client killed during transaction")
def testClientKilled(self):
"""Test that client killed messages are handled correctly."""
self.email_message = {}
def SendEmail(address, sender, title, message, **_):
self.email_message.update(dict(address=address, sender=sender,
title=title, message=message))
with utils.Stubber(email_alerts, "SendEmail", SendEmail):
client = test_lib.CrashClientMock(self.client_id, self.token)
for _ in test_lib.TestFlowHelper(
"FlowWithOneClientRequest", client, client_id=self.client_id,
token=self.token, check_flow_errors=False):
pass
# We expect the email to be sent.
self.assertEqual(self.email_message.get("address", ""),
config_lib.CONFIG["Monitoring.alert_email"])
self.assertTrue(str(self.client_id) in self.email_message["title"])
# Make sure the flow state is included in the email message.
for s in ["Flow name", "FlowWithOneClientRequest", "current_state"]:
self.assertTrue(s in self.email_message["message"])
flow_obj = aff4.FACTORY.Open(client.flow_id, age=aff4.ALL_TIMES,
token=self.token)
self.assertEqual(flow_obj.state.context.state, rdf_flows.Flow.State.ERROR)
# Make sure client object is updated with the last crash.
client_obj = aff4.FACTORY.Open(self.client_id, token=self.token)
crash = client_obj.Get(client_obj.Schema.LAST_CRASH)
self.CheckCrash(crash, flow_obj.session_id)
# Make sure crashes RDFValueCollections are created and written
# into proper locations. First check the per-client crashes collection.
client_crashes = sorted(
list(aff4.FACTORY.Open(self.client_id.Add("crashes"),
aff4_type="PackedVersionedCollection",
token=self.token)),
key=lambda x: x.timestamp)
self.assertTrue(len(client_crashes) >= 1)
crash = list(client_crashes)[0]
self.CheckCrash(crash, flow_obj.session_id)
# Check per-flow crash collection. Check that crash written there is
# equal to per-client crash.
flow_crashes = sorted(
list(flow_obj.GetValuesForAttribute(flow_obj.Schema.CLIENT_CRASH)),
key=lambda x: x.timestamp)
self.assertEqual(len(flow_crashes), len(client_crashes))
for a, b in zip(flow_crashes, client_crashes):
self.assertEqual(a, b)
# Check global crash collection. Check that crash written there is
# equal to per-client crash.
global_crashes = sorted(
aff4.FACTORY.Open(aff4.ROOT_URN.Add("crashes"),
aff4_type="PackedVersionedCollection",
token=self.token),
key=lambda x: x.timestamp)
self.assertEqual(len(global_crashes), len(client_crashes))
for a, b in zip(global_crashes, client_crashes):
self.assertEqual(a, b)
def testNannyMessage(self):
nanny_message = "Oh no!"
self.email_message = {}
def SendEmail(address, sender, title, message, **_):
self.email_message.update(dict(address=address, sender=sender,
title=title, message=message))
with utils.Stubber(email_alerts, "SendEmail", SendEmail):
msg = rdf_flows.GrrMessage(
session_id=rdfvalue.SessionID(flow_name="NannyMessage"),
payload=rdf_protodict.DataBlob(string=nanny_message),
source=self.client_id,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
# This is normally done by the FrontEnd when a CLIENT_KILLED message is
# received.
flow.Events.PublishEvent("NannyMessage", msg, token=self.token)
# Now emulate a worker to process the event.
worker = test_lib.MockWorker(token=self.token)
while worker.Next():
pass
worker.pool.Join()
# We expect the email to be sent.
self.assertEqual(self.email_message.get("address"),
config_lib.CONFIG["Monitoring.alert_email"])
self.assertTrue(str(self.client_id) in self.email_message["title"])
# Make sure the message is included in the email message.
self.assertTrue(nanny_message in self.email_message["message"])
# Make sure crashes RDFValueCollections are created and written
# into proper locations. First check the per-client crashes collection.
client_crashes = list(aff4.FACTORY.Open(
self.client_id.Add("crashes"),
aff4_type="PackedVersionedCollection",
token=self.token))
self.assertEqual(len(client_crashes), 1)
crash = client_crashes[0]
self.assertEqual(crash.client_id, self.client_id)
self.assertEqual(crash.client_info.client_name, "GRR Monitor")
self.assertEqual(crash.crash_type, "aff4:/flows/" +
queues.FLOWS.Basename() + ":NannyMessage")
self.assertEqual(crash.crash_message, nanny_message)
# Check global crash collection. Check that crash written there is
# equal to per-client crash.
global_crashes = list(aff4.FACTORY.Open(
aff4.ROOT_URN.Add("crashes"),
aff4_type="PackedVersionedCollection",
token=self.token))
self.assertEqual(len(global_crashes), 1)
self.assertEqual(global_crashes[0], crash)
def testStartupHandler(self):
# Clean the client records.
aff4.FACTORY.Delete(self.client_id, token=self.token)
client_mock = action_mocks.ActionMock("SendStartupInfo")
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Check the client's boot time and info.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
client_info = fd.Get(fd.Schema.CLIENT_INFO)
boot_time = fd.Get(fd.Schema.LAST_BOOT_TIME)
self.assertEqual(client_info.client_name,
config_lib.CONFIG["Client.name"])
self.assertEqual(client_info.client_description,
config_lib.CONFIG["Client.description"])
# Check that the boot time is accurate.
self.assertAlmostEqual(psutil.boot_time(), boot_time.AsSecondsFromEpoch())
# Run it again - this should not update any record.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertEqual(boot_time.age, fd.Get(fd.Schema.LAST_BOOT_TIME).age)
self.assertEqual(client_info.age, fd.Get(fd.Schema.CLIENT_INFO).age)
# Simulate a reboot in 10 minutes.
current_boot_time = psutil.boot_time()
psutil.boot_time = lambda: current_boot_time + 600
# Run it again - this should now update the boot time.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Ensure only this attribute is updated.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertNotEqual(int(boot_time.age),
int(fd.Get(fd.Schema.LAST_BOOT_TIME).age))
self.assertEqual(int(client_info.age),
int(fd.Get(fd.Schema.CLIENT_INFO).age))
# Now set a new client build time.
with test_lib.ConfigOverrider({
"Client.build_time": time.ctime()}):
# Run it again - this should now update the client info.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Ensure the client info attribute is updated.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertNotEqual(int(client_info.age),
int(fd.Get(fd.Schema.CLIENT_INFO).age))
def testExecutePythonHack(self):
client_mock = action_mocks.ActionMock("ExecutePython")
# This is the code we test. If this runs on the client mock we can check for
# this attribute.
sys.test_code_ran_here = False
code = """
import sys
sys.test_code_ran_here = True
"""
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path="aff4:/config/python_hacks/test", token=self.token)
for _ in test_lib.TestFlowHelper(
"ExecutePythonHack", client_mock, client_id=self.client_id,
hack_name="test", token=self.token):
pass
self.assertTrue(sys.test_code_ran_here)
def testExecutePythonHackWithArgs(self):
client_mock = action_mocks.ActionMock("ExecutePython")
sys.test_code_ran_here = 1234
code = """
import sys
sys.test_code_ran_here = py_args['value']
"""
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path="aff4:/config/python_hacks/test", token=self.token)
for _ in test_lib.TestFlowHelper(
"ExecutePythonHack", client_mock, client_id=self.client_id,
hack_name="test", py_args=dict(value=5678), token=self.token):
pass
self.assertEqual(sys.test_code_ran_here, 5678)
def testExecuteBinariesWithArgs(self):
client_mock = action_mocks.ActionMock("ExecuteBinaryCommand")
code = "I am a binary file"
upload_path = config_lib.CONFIG["Executables.aff4_path"].Add("test.exe")
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path=upload_path, token=self.token)
# This flow has an acl, the user needs to be admin.
user = aff4.FACTORY.Create("aff4:/users/%s" % self.token.username,
mode="rw", aff4_type="GRRUser", token=self.token)
user.SetLabels("admin", owner="GRR")
user.Close()
with utils.Stubber(subprocess, "Popen", test_lib.Popen):
for _ in test_lib.TestFlowHelper(
"LaunchBinary", client_mock, client_id=self.client_id,
binary=upload_path, command_line="--value 356", token=self.token):
pass
# Check that the executable file contains the code string.
self.assertEqual(test_lib.Popen.binary, code)
# At this point, the actual binary should have been cleaned up by the
# client action so it should not exist.
self.assertRaises(IOError, open, test_lib.Popen.running_args[0])
# Check the binary was run with the correct command line.
self.assertEqual(test_lib.Popen.running_args[1], "--value")
self.assertEqual(test_lib.Popen.running_args[2], "356")
# Check the command was in the tmp file.
self.assertTrue(test_lib.Popen.running_args[0].startswith(
config_lib.CONFIG["Client.tempdir"]))
def testExecuteLargeBinaries(self):
client_mock = action_mocks.ActionMock("ExecuteBinaryCommand")
code = "I am a large binary file" * 100
upload_path = config_lib.CONFIG["Executables.aff4_path"].Add("test.exe")
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path=upload_path, limit=100, token=self.token)
# Ensure the aff4 collection has many items.
fd = aff4.FACTORY.Open(upload_path, token=self.token)
# There should be 24 parts to this binary.
self.assertEqual(len(fd.collection), 24)
# Total size is 2400.
self.assertEqual(len(fd), 2400)
# This flow has an acl, the user needs to be admin.
user = aff4.FACTORY.Create("aff4:/users/%s" % self.token.username,
mode="rw", aff4_type="GRRUser", token=self.token)
user.SetLabels("admin", owner="GRR")
user.Close()
with utils.Stubber(subprocess, "Popen", test_lib.Popen):
for _ in test_lib.TestFlowHelper(
"LaunchBinary", client_mock, client_id=self.client_id,
binary=upload_path, command_line="--value 356", token=self.token):
pass
# Check that the executable file contains the code string.
self.assertEqual(test_lib.Popen.binary, code)
# At this point, the actual binary should have been cleaned up by the
# client action so it should not exist.
self.assertRaises(IOError, open, test_lib.Popen.running_args[0])
# Check the binary was run with the correct command line.
self.assertEqual(test_lib.Popen.running_args[1], "--value")
self.assertEqual(test_lib.Popen.running_args[2], "356")
# Check the command was in the tmp file.
self.assertTrue(test_lib.Popen.running_args[0].startswith(
config_lib.CONFIG["Client.tempdir"]))
def testGetClientStats(self):
class ClientMock(object):
def GetClientStats(self, _):
"""Fake get client stats method."""
response = rdf_client.ClientStats()
for i in range(12):
sample = rdf_client.CpuSample(
timestamp=int(i * 10 * 1e6),
user_cpu_time=10 + i,
system_cpu_time=20 + i,
cpu_percent=10 + i)
response.cpu_samples.Append(sample)
sample = rdf_client.IOSample(
timestamp=int(i * 10 * 1e6),
read_bytes=10 + i,
write_bytes=10 + i)
response.io_samples.Append(sample)
return [response]
for _ in test_lib.TestFlowHelper("GetClientStats", ClientMock(),
token=self.token,
client_id=self.client_id):
pass
urn = self.client_id.Add("stats")
stats_fd = aff4.FACTORY.Create(urn, "ClientStats", token=self.token,
mode="rw")
sample = stats_fd.Get(stats_fd.Schema.STATS)
# Samples are taken at the following timestamps and should be split into 2
# bins as follows (sample_interval is 60000000):
# 00000000, 10000000, 20000000, 30000000, 40000000, 50000000 -> Bin 1
# 60000000, 70000000, 80000000, 90000000, 100000000, 110000000 -> Bin 2
self.assertEqual(len(sample.cpu_samples), 2)
self.assertEqual(len(sample.io_samples), 2)
self.assertAlmostEqual(sample.io_samples[0].read_bytes, 15.0)
self.assertAlmostEqual(sample.io_samples[1].read_bytes, 21.0)
self.assertAlmostEqual(sample.cpu_samples[0].cpu_percent,
sum(range(10, 16)) / 6.0)
self.assertAlmostEqual(sample.cpu_samples[1].cpu_percent,
sum(range(16, 22)) / 6.0)
self.assertAlmostEqual(sample.cpu_samples[0].user_cpu_time, 15.0)
self.assertAlmostEqual(sample.cpu_samples[1].system_cpu_time, 31.0)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
[] |
[] |
[
"TEST_TMPDIR"
] |
[]
|
["TEST_TMPDIR"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"unsafe"
"github.com/librespot-org/librespot-golang/Spotify"
"github.com/librespot-org/librespot-golang/librespot"
"github.com/librespot-org/librespot-golang/librespot/core"
"github.com/librespot-org/librespot-golang/librespot/utils"
"github.com/xlab/portaudio-go/portaudio"
"github.com/xlab/vorbis-go/decoder"
)
const (
// The device name that is registered to Spotify servers
defaultDeviceName = "librespot"
// The number of samples per channel in the decoded audio
samplesPerChannel = 2048
// The samples bit depth
bitDepth = 16
// The samples format
sampleFormat = portaudio.PaFloat32
)
func main() {
// First, initialize PortAudio
if err := portaudio.Initialize(); paError(err) {
log.Fatalln("PortAudio init error: ", paErrorText(err))
}
// Read flags from commandline
username := flag.String("username", "", "spotify username")
password := flag.String("password", "", "spotify password")
blob := flag.String("blob", "blob.bin", "spotify auth blob")
devicename := flag.String("devicename", defaultDeviceName, "name of device")
flag.Parse()
// Authenticate
var session *core.Session
var err error
if *username != "" && *password != "" {
// Authenticate using a regular login and password, and store it in the blob file.
session, err = librespot.Login(*username, *password, *devicename)
err := ioutil.WriteFile(*blob, session.ReusableAuthBlob(), 0600)
if err != nil {
fmt.Printf("Could not store authentication blob in blob.bin: %s\n", err)
}
} else if *blob != "" && *username != "" {
// Authenticate reusing an existing blob
blobBytes, err := ioutil.ReadFile(*blob)
if err != nil {
fmt.Printf("Unable to read auth blob from %s: %s\n", *blob, err)
os.Exit(1)
return
}
session, err = librespot.LoginSaved(*username, blobBytes, *devicename)
} else if os.Getenv("client_secret") != "" {
// Authenticate using OAuth (untested)
session, err = librespot.LoginOAuth(*devicename, os.Getenv("client_id"), os.Getenv("client_secret"))
} else {
// No valid options, show the helo
fmt.Println("need to supply a username and password or a blob file path")
fmt.Println("./microclient --username SPOTIFY_USERNAME [--blob ./path/to/blob]")
fmt.Println("or")
fmt.Println("./microclient --username SPOTIFY_USERNAME --password SPOTIFY_PASSWORD [--blob ./path/to/blob]")
return
}
if err != nil {
fmt.Println("Error logging in: ", err)
os.Exit(1)
return
}
// Command loop
reader := bufio.NewReader(os.Stdin)
printHelp()
for {
fmt.Print("> ")
text, _ := reader.ReadString('\n')
cmds := strings.Split(strings.TrimSpace(text), " ")
switch cmds[0] {
case "help":
printHelp()
case "track":
if len(cmds) < 2 {
fmt.Println("You must specify the Base62 Spotify ID of the track")
} else {
funcTrack(session, cmds[1])
}
case "artist":
if len(cmds) < 2 {
fmt.Println("You must specify the Base62 Spotify ID of the artist")
} else {
funcArtist(session, cmds[1])
}
case "album":
if len(cmds) < 2 {
fmt.Println("You must specify the Base62 Spotify ID of the album")
} else {
funcAlbum(session, cmds[1])
}
case "playlists":
funcPlaylists(session)
case "search":
funcSearch(session, cmds[1])
case "play":
if len(cmds) < 2 {
fmt.Println("You must specify the Base62 Spotify ID of the track")
} else {
funcPlay(session, cmds[1])
}
default:
fmt.Println("Unknown command")
}
}
}
func printHelp() {
fmt.Println("\nAvailable commands:")
fmt.Println("play <track>: play specified track by spotify base62 id")
fmt.Println("track <track>: show details on specified track by spotify base62 id")
fmt.Println("album <album>: show details on specified album by spotify base62 id")
fmt.Println("artist <artist>: show details on specified artist by spotify base62 id")
fmt.Println("search <keyword>: start a search on the specified keyword")
fmt.Println("playlists: show your playlists")
fmt.Println("help: show this help")
}
func funcTrack(session *core.Session, trackID string) {
fmt.Println("Loading track: ", trackID)
track, err := session.Mercury().GetTrack(utils.Base62ToHex(trackID))
if err != nil {
fmt.Println("Error loading track: ", err)
return
}
fmt.Println("Track title: ", track.GetName())
}
func funcArtist(session *core.Session, artistID string) {
artist, err := session.Mercury().GetArtist(utils.Base62ToHex(artistID))
if err != nil {
fmt.Println("Error loading artist:", err)
return
}
fmt.Printf("Artist: %s\n", artist.GetName())
fmt.Printf("Popularity: %d\n", artist.GetPopularity())
fmt.Printf("Genre: %s\n", artist.GetGenre())
if artist.GetTopTrack() != nil && len(artist.GetTopTrack()) > 0 {
// Spotify returns top tracks in multiple countries. We take the first
// one as example, but we should use the country data returned by the
// Spotify server (session.Country())
tt := artist.GetTopTrack()[0]
fmt.Printf("\nTop tracks (country %s):\n", tt.GetCountry())
for _, t := range tt.GetTrack() {
// To save bandwidth, only track IDs are returned. If you want
// the track name, you need to fetch it.
fmt.Printf(" => %s\n", utils.ConvertTo62(t.GetGid()))
}
}
fmt.Printf("\nAlbums:\n")
for _, ag := range artist.GetAlbumGroup() {
for _, a := range ag.GetAlbum() {
fmt.Printf(" => %s\n", utils.ConvertTo62(a.GetGid()))
}
}
}
func funcAlbum(session *core.Session, albumID string) {
album, err := session.Mercury().GetAlbum(utils.Base62ToHex(albumID))
if err != nil {
fmt.Println("Error loading album:", err)
return
}
fmt.Printf("Album: %s\n", album.GetName())
fmt.Printf("Popularity: %d\n", album.GetPopularity())
fmt.Printf("Genre: %s\n", album.GetGenre())
fmt.Printf("Date: %d-%d-%d\n", album.GetDate().GetYear(), album.GetDate().GetMonth(), album.GetDate().GetDay())
fmt.Printf("Label: %s\n", album.GetLabel())
fmt.Printf("Type: %s\n", album.GetTyp())
fmt.Printf("Artists: ")
for _, artist := range album.GetArtist() {
fmt.Printf("%s ", utils.ConvertTo62(artist.GetGid()))
}
fmt.Printf("\n")
for _, disc := range album.GetDisc() {
fmt.Printf("\nDisc %d (%s): \n", disc.GetNumber(), disc.GetName())
for _, track := range disc.GetTrack() {
fmt.Printf(" => %s\n", utils.ConvertTo62(track.GetGid()))
}
}
}
func funcPlaylists(session *core.Session) {
fmt.Println("Listing playlists")
playlist, err := session.Mercury().GetRootPlaylist(session.Username())
if err != nil || playlist.Contents == nil {
fmt.Println("Error getting root list: ", err)
return
}
items := playlist.Contents.Items
for i := 0; i < len(items); i++ {
id := strings.TrimPrefix(items[i].GetUri(), "spotify:")
id = strings.Replace(id, ":", "/", -1)
list, _ := session.Mercury().GetPlaylist(id)
fmt.Println(list.Attributes.GetName(), id)
if list.Contents != nil {
for j := 0; j < len(list.Contents.Items); j++ {
item := list.Contents.Items[j]
fmt.Println(" ==> ", *item.Uri)
}
}
}
}
func funcSearch(session *core.Session, keyword string) {
resp, err := session.Mercury().Search(keyword, 12, session.Country(), session.Username())
if err != nil {
fmt.Println("Failed to search:", err)
return
}
res := resp.Results
fmt.Println("Search results for ", keyword)
fmt.Println("=============================")
if res.Error != nil {
fmt.Println("Search result error:", res.Error)
}
fmt.Printf("Albums: %d (total %d)\n", len(res.Albums.Hits), res.Albums.Total)
for _, album := range res.Albums.Hits {
fmt.Printf(" => %s (%s)\n", album.Name, album.Uri)
}
fmt.Printf("\nArtists: %d (total %d)\n", len(res.Artists.Hits), res.Artists.Total)
for _, artist := range res.Artists.Hits {
fmt.Printf(" => %s (%s)\n", artist.Name, artist.Uri)
}
fmt.Printf("\nTracks: %d (total %d)\n", len(res.Tracks.Hits), res.Tracks.Total)
for _, track := range res.Tracks.Hits {
fmt.Printf(" => %s (%s)\n", track.Name, track.Uri)
}
}
func funcPlay(session *core.Session, trackID string) {
fmt.Println("Loading track for play: ", trackID)
// Get the track metadata: it holds information about which files and encodings are available
track, err := session.Mercury().GetTrack(utils.Base62ToHex(trackID))
if err != nil {
fmt.Println("Error loading track: ", err)
return
}
fmt.Println("Track:", track.GetName())
// As a demo, select the OGG 160kbps variant of the track. The "high quality" setting in the official Spotify
// app is the OGG 320kbps variant.
var selectedFile *Spotify.AudioFile
for _, file := range track.GetFile() {
if file.GetFormat() == Spotify.AudioFile_OGG_VORBIS_160 {
selectedFile = file
}
}
// Synchronously load the track
audioFile, err := session.Player().LoadTrack(selectedFile, track.GetGid())
// TODO: channel to be notified of chunks downloaded (or reader?)
if err != nil {
fmt.Printf("Error while loading track: %s\n", err)
} else {
// We have the track audio, let's play it! Initialize the OGG decoder, and start a PortAudio stream.
// Note that we skip the first 167 bytes as it is a Spotify-specific header. You can decode it by
// using this: https://sourceforge.net/p/despotify/code/HEAD/tree/java/trunk/src/main/java/se/despotify/client/player/SpotifyOggHeader.java
fmt.Println("Setting up OGG decoder...")
dec, err := decoder.New(audioFile, samplesPerChannel)
if err != nil {
log.Fatalln(err)
}
info := dec.Info()
go func() {
dec.Decode()
dec.Close()
}()
fmt.Println("Setting up PortAudio stream...")
fmt.Printf("PortAudio channels: %d / SampleRate: %f\n", info.Channels, info.SampleRate)
var wg sync.WaitGroup
var stream *portaudio.Stream
callback := paCallback(&wg, int(info.Channels), dec.SamplesOut())
if err := portaudio.OpenDefaultStream(&stream, 0, info.Channels, sampleFormat, info.SampleRate,
samplesPerChannel, callback, nil); paError(err) {
log.Fatalln(paErrorText(err))
}
fmt.Println("Starting playback...")
if err := portaudio.StartStream(stream); paError(err) {
log.Fatalln(paErrorText(err))
}
wg.Wait()
}
}
// PortAudio helpers
func paError(err portaudio.Error) bool {
return portaudio.ErrorCode(err) != portaudio.PaNoError
}
func paErrorText(err portaudio.Error) string {
return "PortAudio error: " + portaudio.GetErrorText(err)
}
func paCallback(wg *sync.WaitGroup, channels int, samples <-chan [][]float32) portaudio.StreamCallback {
wg.Add(1)
return func(_ unsafe.Pointer, output unsafe.Pointer, sampleCount uint,
_ *portaudio.StreamCallbackTimeInfo, _ portaudio.StreamCallbackFlags, _ unsafe.Pointer) int32 {
const (
statusContinue = int32(portaudio.PaContinue)
statusComplete = int32(portaudio.PaComplete)
)
frame, ok := <-samples
if !ok {
wg.Done()
return statusComplete
}
if len(frame) > int(sampleCount) {
frame = frame[:sampleCount]
}
var idx int
out := (*(*[1 << 32]float32)(unsafe.Pointer(output)))[:int(sampleCount)*channels]
for _, sample := range frame {
if len(sample) > channels {
sample = sample[:channels]
}
for i := range sample {
out[idx] = sample[i]
idx++
}
}
return statusContinue
}
}
|
[
"\"client_secret\"",
"\"client_id\"",
"\"client_secret\""
] |
[] |
[
"client_secret",
"client_id"
] |
[]
|
["client_secret", "client_id"]
|
go
| 2 | 0 | |
src/net/http/server.go
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// HTTP server. See RFC 7230 through 7235.
package http
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/textproto"
"net/url"
"os"
"path"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"golang_org/x/net/http/httpguts"
)
// Errors used by the HTTP server.
var (
// ErrBodyNotAllowed is returned by ResponseWriter.Write calls
// when the HTTP method or response code does not permit a
// body.
ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
// ErrHijacked is returned by ResponseWriter.Write calls when
// the underlying connection has been hijacked using the
// Hijacker interface. A zero-byte write on a hijacked
// connection will return ErrHijacked without any other side
// effects.
ErrHijacked = errors.New("http: connection has been hijacked")
// ErrContentLength is returned by ResponseWriter.Write calls
// when a Handler set a Content-Length response header with a
// declared size and then attempted to write more bytes than
// declared.
ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
// Deprecated: ErrWriteAfterFlush is no longer used.
ErrWriteAfterFlush = errors.New("unused")
)
// A Handler responds to an HTTP request.
//
// ServeHTTP should write reply headers and data to the ResponseWriter
// and then return. Returning signals that the request is finished; it
// is not valid to use the ResponseWriter or read from the
// Request.Body after or concurrently with the completion of the
// ServeHTTP call.
//
// Depending on the HTTP client software, HTTP protocol version, and
// any intermediaries between the client and the Go server, it may not
// be possible to read from the Request.Body after writing to the
// ResponseWriter. Cautious handlers should read the Request.Body
// first, and then reply.
//
// Except for reading the body, handlers should not modify the
// provided Request.
//
// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
// that the effect of the panic was isolated to the active request.
// It recovers the panic, logs a stack trace to the server error log,
// and either closes the network connection or sends an HTTP/2
// RST_STREAM, depending on the HTTP protocol. To abort a handler so
// the client sees an interrupted response but the server doesn't log
// an error, panic with the value ErrAbortHandler.
type Handler interface {
ServeHTTP(ResponseWriter, *Request)
}
// A ResponseWriter interface is used by an HTTP handler to
// construct an HTTP response.
//
// A ResponseWriter may not be used after the Handler.ServeHTTP method
// has returned.
type ResponseWriter interface {
// Header returns the header map that will be sent by
// WriteHeader. The Header map also is the mechanism with which
// Handlers can set HTTP trailers.
//
// Changing the header map after a call to WriteHeader (or
// Write) has no effect unless the modified headers are
// trailers.
//
// There are two ways to set Trailers. The preferred way is to
// predeclare in the headers which trailers you will later
// send by setting the "Trailer" header to the names of the
// trailer keys which will come later. In this case, those
// keys of the Header map are treated as if they were
// trailers. See the example. The second way, for trailer
// keys not known to the Handler until after the first Write,
// is to prefix the Header map keys with the TrailerPrefix
// constant value. See TrailerPrefix.
//
// To suppress implicit response headers (such as "Date"), set
// their value to nil.
Header() Header
// Write writes the data to the connection as part of an HTTP reply.
//
// If WriteHeader has not yet been called, Write calls
// WriteHeader(http.StatusOK) before writing the data. If the Header
// does not contain a Content-Type line, Write adds a Content-Type set
// to the result of passing the initial 512 bytes of written data to
// DetectContentType.
//
// Depending on the HTTP protocol version and the client, calling
// Write or WriteHeader may prevent future reads on the
// Request.Body. For HTTP/1.x requests, handlers should read any
// needed request body data before writing the response. Once the
// headers have been flushed (due to either an explicit Flusher.Flush
// call or writing enough data to trigger a flush), the request body
// may be unavailable. For HTTP/2 requests, the Go HTTP server permits
// handlers to continue to read the request body while concurrently
// writing the response. However, such behavior may not be supported
// by all HTTP/2 clients. Handlers should read before writing if
// possible to maximize compatibility.
Write([]byte) (int, error)
// WriteHeader sends an HTTP response header with the provided
// status code.
//
// If WriteHeader is not called explicitly, the first call to Write
// will trigger an implicit WriteHeader(http.StatusOK).
// Thus explicit calls to WriteHeader are mainly used to
// send error codes.
//
// The provided code must be a valid HTTP 1xx-5xx status code.
// Only one header may be written. Go does not currently
// support sending user-defined 1xx informational headers,
// with the exception of 100-continue response header that the
// Server sends automatically when the Request.Body is read.
WriteHeader(statusCode int)
}
// The Flusher interface is implemented by ResponseWriters that allow
// an HTTP handler to flush buffered data to the client.
//
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations
// support Flusher, but ResponseWriter wrappers may not. Handlers
// should always test for this ability at runtime.
//
// Note that even for ResponseWriters that support Flush,
// if the client is connected through an HTTP proxy,
// the buffered data may not reach the client until the response
// completes.
type Flusher interface {
// Flush sends any buffered data to the client.
Flush()
}
// The Hijacker interface is implemented by ResponseWriters that allow
// an HTTP handler to take over the connection.
//
// The default ResponseWriter for HTTP/1.x connections supports
// Hijacker, but HTTP/2 connections intentionally do not.
// ResponseWriter wrappers may also not support Hijacker. Handlers
// should always test for this ability at runtime.
type Hijacker interface {
// Hijack lets the caller take over the connection.
// After a call to Hijack the HTTP server library
// will not do anything else with the connection.
//
// It becomes the caller's responsibility to manage
// and close the connection.
//
// The returned net.Conn may have read or write deadlines
// already set, depending on the configuration of the
// Server. It is the caller's responsibility to set
// or clear those deadlines as needed.
//
// The returned bufio.Reader may contain unprocessed buffered
// data from the client.
//
// After a call to Hijack, the original Request.Body must
// not be used.
Hijack() (net.Conn, *bufio.ReadWriter, error)
}
// The CloseNotifier interface is implemented by ResponseWriters which
// allow detecting when the underlying connection has gone away.
//
// This mechanism can be used to cancel long operations on the server
// if the client has disconnected before the response is ready.
type CloseNotifier interface {
// CloseNotify returns a channel that receives at most a
// single value (true) when the client connection has gone
// away.
//
// CloseNotify may wait to notify until Request.Body has been
// fully read.
//
// After the Handler has returned, there is no guarantee
// that the channel receives a value.
//
// If the protocol is HTTP/1.1 and CloseNotify is called while
// processing an idempotent request (such a GET) while
// HTTP/1.1 pipelining is in use, the arrival of a subsequent
// pipelined request may cause a value to be sent on the
// returned channel. In practice HTTP/1.1 pipelining is not
// enabled in browsers and not seen often in the wild. If this
// is a problem, use HTTP/2 or only use CloseNotify on methods
// such as POST.
CloseNotify() <-chan bool
}
var (
// ServerContextKey is a context key. It can be used in HTTP
// handlers with context.WithValue to access the server that
// started the handler. The associated value will be of
// type *Server.
ServerContextKey = &contextKey{"http-server"}
// LocalAddrContextKey is a context key. It can be used in
// HTTP handlers with context.WithValue to access the local
// address the connection arrived on.
// The associated value will be of type net.Addr.
LocalAddrContextKey = &contextKey{"local-addr"}
)
// A conn represents the server side of an HTTP connection.
type conn struct {
// server is the server on which the connection arrived.
// Immutable; never nil.
server *Server
// cancelCtx cancels the connection-level context.
cancelCtx context.CancelFunc
// rwc is the underlying network connection.
// This is never wrapped by other types and is the value given out
// to CloseNotifier callers. It is usually of type *net.TCPConn or
// *tls.Conn.
rwc net.Conn
// remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
// inside the Listener's Accept goroutine, as some implementations block.
// It is populated immediately inside the (*conn).serve goroutine.
// This is the value of a Handler's (*Request).RemoteAddr.
remoteAddr string
// tlsState is the TLS connection state when using TLS.
// nil means not TLS.
tlsState *tls.ConnectionState
// werr is set to the first write error to rwc.
// It is set via checkConnErrorWriter{w}, where bufw writes.
werr error
// r is bufr's read source. It's a wrapper around rwc that provides
// io.LimitedReader-style limiting (while reading request headers)
// and functionality to support CloseNotifier. See *connReader docs.
r *connReader
// bufr reads from r.
bufr *bufio.Reader
// bufw writes to checkConnErrorWriter{c}, which populates werr on error.
bufw *bufio.Writer
// lastMethod is the method of the most recent request
// on this connection, if any.
lastMethod string
curReq atomic.Value // of *response (which has a Request in it)
curState atomic.Value // of ConnState
// mu guards hijackedv
mu sync.Mutex
// hijackedv is whether this connection has been hijacked
// by a Handler with the Hijacker interface.
// It is guarded by mu.
hijackedv bool
}
func (c *conn) hijacked() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.hijackedv
}
// c.mu must be held.
func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
if c.hijackedv {
return nil, nil, ErrHijacked
}
c.r.abortPendingRead()
c.hijackedv = true
rwc = c.rwc
rwc.SetDeadline(time.Time{})
buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
if c.r.hasByte {
if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
}
}
c.setState(rwc, StateHijacked)
return
}
// This should be >= 512 bytes for DetectContentType,
// but otherwise it's somewhat arbitrary.
const bufferBeforeChunkingSize = 2048
// chunkWriter writes to a response's conn buffer, and is the writer
// wrapped by the response.bufw buffered writer.
//
// chunkWriter also is responsible for finalizing the Header, including
// conditionally setting the Content-Type and setting a Content-Length
// in cases where the handler's final output is smaller than the buffer
// size. It also conditionally adds chunk headers, when in chunking mode.
//
// See the comment above (*response).Write for the entire write flow.
type chunkWriter struct {
res *response
// header is either nil or a deep clone of res.handlerHeader
// at the time of res.WriteHeader, if res.WriteHeader is
// called and extra buffering is being done to calculate
// Content-Type and/or Content-Length.
header Header
// wroteHeader tells whether the header's been written to "the
// wire" (or rather: w.conn.buf). this is unlike
// (*response).wroteHeader, which tells only whether it was
// logically written.
wroteHeader bool
// set by the writeHeader method:
chunking bool // using chunked transfer encoding for reply body
}
var (
crlf = []byte("\r\n")
colonSpace = []byte(": ")
)
func (cw *chunkWriter) Write(p []byte) (n int, err error) {
if !cw.wroteHeader {
cw.writeHeader(p)
}
if cw.res.req.Method == "HEAD" {
// Eat writes.
return len(p), nil
}
if cw.chunking {
_, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
if err != nil {
cw.res.conn.rwc.Close()
return
}
}
n, err = cw.res.conn.bufw.Write(p)
if cw.chunking && err == nil {
_, err = cw.res.conn.bufw.Write(crlf)
}
if err != nil {
cw.res.conn.rwc.Close()
}
return
}
func (cw *chunkWriter) flush() {
if !cw.wroteHeader {
cw.writeHeader(nil)
}
cw.res.conn.bufw.Flush()
}
func (cw *chunkWriter) close() {
if !cw.wroteHeader {
cw.writeHeader(nil)
}
if cw.chunking {
bw := cw.res.conn.bufw // conn's bufio writer
// zero chunk to mark EOF
bw.WriteString("0\r\n")
if trailers := cw.res.finalTrailers(); trailers != nil {
trailers.Write(bw) // the writer handles noting errors
}
// final blank line after the trailers (whether
// present or not)
bw.WriteString("\r\n")
}
}
// A response represents the server side of an HTTP response.
type response struct {
conn *conn
req *Request // request for this response
reqBody io.ReadCloser
cancelCtx context.CancelFunc // when ServeHTTP exits
wroteHeader bool // reply header has been (logically) written
wroteContinue bool // 100 Continue response was written
wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
wantsClose bool // HTTP request has Connection "close"
w *bufio.Writer // buffers output in chunks to chunkWriter
cw chunkWriter
// handlerHeader is the Header that Handlers get access to,
// which may be retained and mutated even after WriteHeader.
// handlerHeader is copied into cw.header at WriteHeader
// time, and privately mutated thereafter.
handlerHeader Header
calledHeader bool // handler accessed handlerHeader via Header
written int64 // number of bytes written in body
contentLength int64 // explicitly-declared Content-Length; or -1
status int // status code passed to WriteHeader
// close connection after this reply. set on request and
// updated after response from handler if there's a
// "Connection: keep-alive" response header and a
// Content-Length.
closeAfterReply bool
// requestBodyLimitHit is set by requestTooLarge when
// maxBytesReader hits its max size. It is checked in
// WriteHeader, to make sure we don't consume the
// remaining request body to try to advance to the next HTTP
// request. Instead, when this is set, we stop reading
// subsequent requests on this connection and stop reading
// input from it.
requestBodyLimitHit bool
// trailers are the headers to be sent after the handler
// finishes writing the body. This field is initialized from
// the Trailer response header when the response header is
// written.
trailers []string
handlerDone atomicBool // set true when the handler exits
// Buffers for Date, Content-Length, and status code
dateBuf [len(TimeFormat)]byte
clenBuf [10]byte
statusBuf [3]byte
// closeNotifyCh is the channel returned by CloseNotify.
// TODO(bradfitz): this is currently (for Go 1.8) always
// non-nil. Make this lazily-created again as it used to be?
closeNotifyCh chan bool
didCloseNotify int32 // atomic (only 0->1 winner should send)
}
// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
// that, if present, signals that the map entry is actually for
// the response trailers, and not the response headers. The prefix
// is stripped after the ServeHTTP call finishes and the values are
// sent in the trailers.
//
// This mechanism is intended only for trailers that are not known
// prior to the headers being written. If the set of trailers is fixed
// or known before the header is written, the normal Go trailers mechanism
// is preferred:
// https://golang.org/pkg/net/http/#ResponseWriter
// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
const TrailerPrefix = "Trailer:"
// finalTrailers is called after the Handler exits and returns a non-nil
// value if the Handler set any trailers.
func (w *response) finalTrailers() Header {
var t Header
for k, vv := range w.handlerHeader {
if strings.HasPrefix(k, TrailerPrefix) {
if t == nil {
t = make(Header)
}
t[strings.TrimPrefix(k, TrailerPrefix)] = vv
}
}
for _, k := range w.trailers {
if t == nil {
t = make(Header)
}
for _, v := range w.handlerHeader[k] {
t.Add(k, v)
}
}
return t
}
type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
// written in the trailers at the end of the response.
func (w *response) declareTrailer(k string) {
k = CanonicalHeaderKey(k)
if !httpguts.ValidTrailerHeader(k) {
// Forbidden by RFC 7230, section 4.1.2
return
}
w.trailers = append(w.trailers, k)
}
// requestTooLarge is called by maxBytesReader when too much input has
// been read from the client.
func (w *response) requestTooLarge() {
w.closeAfterReply = true
w.requestBodyLimitHit = true
if !w.wroteHeader {
w.Header().Set("Connection", "close")
}
}
// needsSniff reports whether a Content-Type still needs to be sniffed.
func (w *response) needsSniff() bool {
_, haveType := w.handlerHeader["Content-Type"]
return !w.cw.wroteHeader && !haveType && w.written < sniffLen
}
// writerOnly hides an io.Writer value's optional ReadFrom method
// from io.Copy.
type writerOnly struct {
io.Writer
}
func srcIsRegularFile(src io.Reader) (isRegular bool, err error) {
switch v := src.(type) {
case *os.File:
fi, err := v.Stat()
if err != nil {
return false, err
}
return fi.Mode().IsRegular(), nil
case *io.LimitedReader:
return srcIsRegularFile(v.R)
default:
return
}
}
// ReadFrom is here to optimize copying from an *os.File regular file
// to a *net.TCPConn with sendfile.
func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
// Our underlying w.conn.rwc is usually a *TCPConn (with its
// own ReadFrom method). If not, or if our src isn't a regular
// file, just fall back to the normal copy method.
rf, ok := w.conn.rwc.(io.ReaderFrom)
regFile, err := srcIsRegularFile(src)
if err != nil {
return 0, err
}
if !ok || !regFile {
bufp := copyBufPool.Get().(*[]byte)
defer copyBufPool.Put(bufp)
return io.CopyBuffer(writerOnly{w}, src, *bufp)
}
// sendfile path:
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
if w.needsSniff() {
n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen))
n += n0
if err != nil {
return n, err
}
}
w.w.Flush() // get rid of any previous writes
w.cw.flush() // make sure Header is written; flush data to rwc
// Now that cw has been flushed, its chunking field is guaranteed initialized.
if !w.cw.chunking && w.bodyAllowed() {
n0, err := rf.ReadFrom(src)
n += n0
w.written += n0
return n, err
}
n0, err := io.Copy(writerOnly{w}, src)
n += n0
return n, err
}
// debugServerConnections controls whether all server connections are wrapped
// with a verbose logging wrapper.
const debugServerConnections = false
// Create new connection from rwc.
func (srv *Server) newConn(rwc net.Conn) *conn {
c := &conn{
server: srv,
rwc: rwc,
}
if debugServerConnections {
c.rwc = newLoggingConn("server", c.rwc)
}
return c
}
type readResult struct {
n int
err error
b byte // byte read, if n == 1
}
// connReader is the io.Reader wrapper used by *conn. It combines a
// selectively-activated io.LimitedReader (to bound request header
// read sizes) with support for selectively keeping an io.Reader.Read
// call blocked in a background goroutine to wait for activity and
// trigger a CloseNotifier channel.
type connReader struct {
conn *conn
mu sync.Mutex // guards following
hasByte bool
byteBuf [1]byte
cond *sync.Cond
inRead bool
aborted bool // set true before conn.rwc deadline is set to past
remain int64 // bytes remaining
}
func (cr *connReader) lock() {
cr.mu.Lock()
if cr.cond == nil {
cr.cond = sync.NewCond(&cr.mu)
}
}
func (cr *connReader) unlock() { cr.mu.Unlock() }
func (cr *connReader) startBackgroundRead() {
cr.lock()
defer cr.unlock()
if cr.inRead {
panic("invalid concurrent Body.Read call")
}
if cr.hasByte {
return
}
cr.inRead = true
cr.conn.rwc.SetReadDeadline(time.Time{})
go cr.backgroundRead()
}
func (cr *connReader) backgroundRead() {
n, err := cr.conn.rwc.Read(cr.byteBuf[:])
cr.lock()
if n == 1 {
cr.hasByte = true
// We were at EOF already (since we wouldn't be in a
// background read otherwise), so this is a pipelined
// HTTP request.
cr.closeNotifyFromPipelinedRequest()
}
if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
// Ignore this error. It's the expected error from
// another goroutine calling abortPendingRead.
} else if err != nil {
cr.handleReadError(err)
}
cr.aborted = false
cr.inRead = false
cr.unlock()
cr.cond.Broadcast()
}
func (cr *connReader) abortPendingRead() {
cr.lock()
defer cr.unlock()
if !cr.inRead {
return
}
cr.aborted = true
cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
for cr.inRead {
cr.cond.Wait()
}
cr.conn.rwc.SetReadDeadline(time.Time{})
}
func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 }
func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 }
// may be called from multiple goroutines.
func (cr *connReader) handleReadError(err error) {
cr.conn.cancelCtx()
cr.closeNotify()
}
// closeNotifyFromPipelinedRequest simply calls closeNotify.
//
// This method wrapper is here for documentation. The callers are the
// cases where we send on the closenotify channel because of a
// pipelined HTTP request, per the previous Go behavior and
// documentation (that this "MAY" happen).
//
// TODO: consider changing this behavior and making context
// cancelation and closenotify work the same.
func (cr *connReader) closeNotifyFromPipelinedRequest() {
cr.closeNotify()
}
// may be called from multiple goroutines.
func (cr *connReader) closeNotify() {
res, _ := cr.conn.curReq.Load().(*response)
if res != nil {
if atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) {
res.closeNotifyCh <- true
}
}
}
func (cr *connReader) Read(p []byte) (n int, err error) {
cr.lock()
if cr.inRead {
cr.unlock()
if cr.conn.hijacked() {
panic("invalid Body.Read call. After hijacked, the original Request must not be used")
}
panic("invalid concurrent Body.Read call")
}
if cr.hitReadLimit() {
cr.unlock()
return 0, io.EOF
}
if len(p) == 0 {
cr.unlock()
return 0, nil
}
if int64(len(p)) > cr.remain {
p = p[:cr.remain]
}
if cr.hasByte {
p[0] = cr.byteBuf[0]
cr.hasByte = false
cr.unlock()
return 1, nil
}
cr.inRead = true
cr.unlock()
n, err = cr.conn.rwc.Read(p)
cr.lock()
cr.inRead = false
if err != nil {
cr.handleReadError(err)
}
cr.remain -= int64(n)
cr.unlock()
cr.cond.Broadcast()
return n, err
}
var (
bufioReaderPool sync.Pool
bufioWriter2kPool sync.Pool
bufioWriter4kPool sync.Pool
)
var copyBufPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 32*1024)
return &b
},
}
func bufioWriterPool(size int) *sync.Pool {
switch size {
case 2 << 10:
return &bufioWriter2kPool
case 4 << 10:
return &bufioWriter4kPool
}
return nil
}
func newBufioReader(r io.Reader) *bufio.Reader {
if v := bufioReaderPool.Get(); v != nil {
br := v.(*bufio.Reader)
br.Reset(r)
return br
}
// Note: if this reader size is ever changed, update
// TestHandlerBodyClose's assumptions.
return bufio.NewReader(r)
}
func putBufioReader(br *bufio.Reader) {
br.Reset(nil)
bufioReaderPool.Put(br)
}
func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
pool := bufioWriterPool(size)
if pool != nil {
if v := pool.Get(); v != nil {
bw := v.(*bufio.Writer)
bw.Reset(w)
return bw
}
}
return bufio.NewWriterSize(w, size)
}
func putBufioWriter(bw *bufio.Writer) {
bw.Reset(nil)
if pool := bufioWriterPool(bw.Available()); pool != nil {
pool.Put(bw)
}
}
// DefaultMaxHeaderBytes is the maximum permitted size of the headers
// in an HTTP request.
// This can be overridden by setting Server.MaxHeaderBytes.
const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
func (srv *Server) maxHeaderBytes() int {
if srv.MaxHeaderBytes > 0 {
return srv.MaxHeaderBytes
}
return DefaultMaxHeaderBytes
}
func (srv *Server) initialReadLimitSize() int64 {
return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
}
// wrapper around io.ReadCloser which on first read, sends an
// HTTP/1.1 100 Continue header
type expectContinueReader struct {
resp *response
readCloser io.ReadCloser
closed bool
sawEOF bool
}
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
if ecr.closed {
return 0, ErrBodyReadAfterClose
}
if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() {
ecr.resp.wroteContinue = true
ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
ecr.resp.conn.bufw.Flush()
}
n, err = ecr.readCloser.Read(p)
if err == io.EOF {
ecr.sawEOF = true
}
return
}
func (ecr *expectContinueReader) Close() error {
ecr.closed = true
return ecr.readCloser.Close()
}
// TimeFormat is the time format to use when generating times in HTTP
// headers. It is like time.RFC1123 but hard-codes GMT as the time
// zone. The time being formatted must be in UTC for Format to
// generate the correct format.
//
// For parsing this time format, see ParseTime.
const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
func appendTime(b []byte, t time.Time) []byte {
const days = "SunMonTueWedThuFriSat"
const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
t = t.UTC()
yy, mm, dd := t.Date()
hh, mn, ss := t.Clock()
day := days[3*t.Weekday():]
mon := months[3*(mm-1):]
return append(b,
day[0], day[1], day[2], ',', ' ',
byte('0'+dd/10), byte('0'+dd%10), ' ',
mon[0], mon[1], mon[2], ' ',
byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
byte('0'+hh/10), byte('0'+hh%10), ':',
byte('0'+mn/10), byte('0'+mn%10), ':',
byte('0'+ss/10), byte('0'+ss%10), ' ',
'G', 'M', 'T')
}
var errTooLarge = errors.New("http: request too large")
// Read next request from connection.
func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
if c.hijacked() {
return nil, ErrHijacked
}
var (
wholeReqDeadline time.Time // or zero if none
hdrDeadline time.Time // or zero if none
)
t0 := time.Now()
if d := c.server.readHeaderTimeout(); d != 0 {
hdrDeadline = t0.Add(d)
}
if d := c.server.ReadTimeout; d != 0 {
wholeReqDeadline = t0.Add(d)
}
c.rwc.SetReadDeadline(hdrDeadline)
if d := c.server.WriteTimeout; d != 0 {
defer func() {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}()
}
c.r.setReadLimit(c.server.initialReadLimitSize())
if c.lastMethod == "POST" {
// RFC 7230 section 3 tolerance for old buggy clients.
peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
c.bufr.Discard(numLeadingCRorLF(peek))
}
req, err := readRequest(c.bufr, keepHostHeader)
if err != nil {
if c.r.hitReadLimit() {
return nil, errTooLarge
}
return nil, err
}
if !http1ServerSupportsRequest(req) {
return nil, badRequestError("unsupported protocol version")
}
c.lastMethod = req.Method
c.r.setInfiniteReadLimit()
hosts, haveHost := req.Header["Host"]
isH2Upgrade := req.isH2Upgrade()
if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
return nil, badRequestError("missing required Host header")
}
if len(hosts) > 1 {
return nil, badRequestError("too many Host headers")
}
if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
return nil, badRequestError("malformed Host header")
}
for k, vv := range req.Header {
if !httpguts.ValidHeaderFieldName(k) {
return nil, badRequestError("invalid header name")
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
return nil, badRequestError("invalid header value")
}
}
}
delete(req.Header, "Host")
ctx, cancelCtx := context.WithCancel(ctx)
req.ctx = ctx
req.RemoteAddr = c.remoteAddr
req.TLS = c.tlsState
if body, ok := req.Body.(*body); ok {
body.doEarlyClose = true
}
// Adjust the read deadline if necessary.
if !hdrDeadline.Equal(wholeReqDeadline) {
c.rwc.SetReadDeadline(wholeReqDeadline)
}
w = &response{
conn: c,
cancelCtx: cancelCtx,
req: req,
reqBody: req.Body,
handlerHeader: make(Header),
contentLength: -1,
closeNotifyCh: make(chan bool, 1),
// We populate these ahead of time so we're not
// reading from req.Header after their Handler starts
// and maybe mutates it (Issue 14940)
wants10KeepAlive: req.wantsHttp10KeepAlive(),
wantsClose: req.wantsClose(),
}
if isH2Upgrade {
w.closeAfterReply = true
}
w.cw.res = w
w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
return w, nil
}
// http1ServerSupportsRequest reports whether Go's HTTP/1.x server
// supports the given request.
func http1ServerSupportsRequest(req *Request) bool {
if req.ProtoMajor == 1 {
return true
}
// Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
// wire up their own HTTP/2 upgrades.
if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
req.Method == "PRI" && req.RequestURI == "*" {
return true
}
// Reject HTTP/0.x, and all other HTTP/2+ requests (which
// aren't encoded in ASCII anyway).
return false
}
func (w *response) Header() Header {
if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
// Accessing the header between logically writing it
// and physically writing it means we need to allocate
// a clone to snapshot the logically written state.
w.cw.header = w.handlerHeader.clone()
}
w.calledHeader = true
return w.handlerHeader
}
// maxPostHandlerReadBytes is the max number of Request.Body bytes not
// consumed by a handler that the server will read from the client
// in order to keep a connection alive. If there are more bytes than
// this then the server to be paranoid instead sends a "Connection:
// close" response.
//
// This number is approximately what a typical machine's TCP buffer
// size is anyway. (if we have the bytes on the machine, we might as
// well read them)
const maxPostHandlerReadBytes = 256 << 10
func checkWriteHeaderCode(code int) {
// Issue 22880: require valid WriteHeader status codes.
// For now we only enforce that it's three digits.
// In the future we might block things over 599 (600 and above aren't defined
// at http://httpwg.org/specs/rfc7231.html#status.codes)
// and we might block under 200 (once we have more mature 1xx support).
// But for now any three digits.
//
// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
// no equivalent bogus thing we can realistically send in HTTP/2,
// so we'll consistently panic instead and help people find their bugs
// early. (We can't return an error from WriteHeader even if we wanted to.)
if code < 100 || code > 999 {
panic(fmt.Sprintf("invalid WriteHeader code %v", code))
}
}
func (w *response) WriteHeader(code int) {
if w.conn.hijacked() {
w.conn.server.logf("http: response.WriteHeader on hijacked connection")
return
}
if w.wroteHeader {
w.conn.server.logf("http: multiple response.WriteHeader calls")
return
}
checkWriteHeaderCode(code)
w.wroteHeader = true
w.status = code
if w.calledHeader && w.cw.header == nil {
w.cw.header = w.handlerHeader.clone()
}
if cl := w.handlerHeader.get("Content-Length"); cl != "" {
v, err := strconv.ParseInt(cl, 10, 64)
if err == nil && v >= 0 {
w.contentLength = v
} else {
w.conn.server.logf("http: invalid Content-Length of %q", cl)
w.handlerHeader.Del("Content-Length")
}
}
}
// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
// This type is used to avoid extra allocations from cloning and/or populating
// the response Header map and all its 1-element slices.
type extraHeader struct {
contentType string
connection string
transferEncoding string
date []byte // written if not nil
contentLength []byte // written if not nil
}
// Sorted the same as extraHeader.Write's loop.
var extraHeaderKeys = [][]byte{
[]byte("Content-Type"),
[]byte("Connection"),
[]byte("Transfer-Encoding"),
}
var (
headerContentLength = []byte("Content-Length: ")
headerDate = []byte("Date: ")
)
// Write writes the headers described in h to w.
//
// This method has a value receiver, despite the somewhat large size
// of h, because it prevents an allocation. The escape analysis isn't
// smart enough to realize this function doesn't mutate h.
func (h extraHeader) Write(w *bufio.Writer) {
if h.date != nil {
w.Write(headerDate)
w.Write(h.date)
w.Write(crlf)
}
if h.contentLength != nil {
w.Write(headerContentLength)
w.Write(h.contentLength)
w.Write(crlf)
}
for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
if v != "" {
w.Write(extraHeaderKeys[i])
w.Write(colonSpace)
w.WriteString(v)
w.Write(crlf)
}
}
}
// writeHeader finalizes the header sent to the client and writes it
// to cw.res.conn.bufw.
//
// p is not written by writeHeader, but is the first chunk of the body
// that will be written. It is sniffed for a Content-Type if none is
// set explicitly. It's also used to set the Content-Length, if the
// total body size was small and the handler has already finished
// running.
func (cw *chunkWriter) writeHeader(p []byte) {
if cw.wroteHeader {
return
}
cw.wroteHeader = true
w := cw.res
keepAlivesEnabled := w.conn.server.doKeepAlives()
isHEAD := w.req.Method == "HEAD"
// header is written out to w.conn.buf below. Depending on the
// state of the handler, we either own the map or not. If we
// don't own it, the exclude map is created lazily for
// WriteSubset to remove headers. The setHeader struct holds
// headers we need to add.
header := cw.header
owned := header != nil
if !owned {
header = w.handlerHeader
}
var excludeHeader map[string]bool
delHeader := func(key string) {
if owned {
header.Del(key)
return
}
if _, ok := header[key]; !ok {
return
}
if excludeHeader == nil {
excludeHeader = make(map[string]bool)
}
excludeHeader[key] = true
}
var setHeader extraHeader
// Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
trailers := false
for k := range cw.header {
if strings.HasPrefix(k, TrailerPrefix) {
if excludeHeader == nil {
excludeHeader = make(map[string]bool)
}
excludeHeader[k] = true
trailers = true
}
}
for _, v := range cw.header["Trailer"] {
trailers = true
foreachHeaderElement(v, cw.res.declareTrailer)
}
te := header.get("Transfer-Encoding")
hasTE := te != ""
// If the handler is done but never sent a Content-Length
// response header and this is our first (and last) write, set
// it, even to zero. This helps HTTP/1.0 clients keep their
// "keep-alive" connections alive.
// Exceptions: 304/204/1xx responses never get Content-Length, and if
// it was a HEAD request, we don't know the difference between
// 0 actual bytes and 0 bytes because the handler noticed it
// was a HEAD request and chose not to write anything. So for
// HEAD, the handler should either write the Content-Length or
// write non-zero bytes. If it's actually 0 bytes and the
// handler never looked at the Request.Method, we just don't
// send a Content-Length header.
// Further, we don't send an automatic Content-Length if they
// set a Transfer-Encoding, because they're generally incompatible.
if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
w.contentLength = int64(len(p))
setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
}
// If this was an HTTP/1.0 request with keep-alive and we sent a
// Content-Length back, we can make this a keep-alive response ...
if w.wants10KeepAlive && keepAlivesEnabled {
sentLength := header.get("Content-Length") != ""
if sentLength && header.get("Connection") == "keep-alive" {
w.closeAfterReply = false
}
}
// Check for an explicit (and valid) Content-Length header.
hasCL := w.contentLength != -1
if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
_, connectionHeaderSet := header["Connection"]
if !connectionHeaderSet {
setHeader.connection = "keep-alive"
}
} else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
w.closeAfterReply = true
}
if header.get("Connection") == "close" || !keepAlivesEnabled {
w.closeAfterReply = true
}
// If the client wanted a 100-continue but we never sent it to
// them (or, more strictly: we never finished reading their
// request body), don't reuse this connection because it's now
// in an unknown state: we might be sending this response at
// the same time the client is now sending its request body
// after a timeout. (Some HTTP clients send Expect:
// 100-continue but knowing that some servers don't support
// it, the clients set a timer and send the body later anyway)
// If we haven't seen EOF, we can't skip over the unread body
// because we don't know if the next bytes on the wire will be
// the body-following-the-timer or the subsequent request.
// See Issue 11549.
if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF {
w.closeAfterReply = true
}
// Per RFC 2616, we should consume the request body before
// replying, if the handler hasn't already done so. But we
// don't want to do an unbounded amount of reading here for
// DoS reasons, so we only try up to a threshold.
// TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
// about HTTP/1.x Handlers concurrently reading and writing, like
// HTTP/2 handlers can do. Maybe this code should be relaxed?
if w.req.ContentLength != 0 && !w.closeAfterReply {
var discard, tooBig bool
switch bdy := w.req.Body.(type) {
case *expectContinueReader:
if bdy.resp.wroteContinue {
discard = true
}
case *body:
bdy.mu.Lock()
switch {
case bdy.closed:
if !bdy.sawEOF {
// Body was closed in handler with non-EOF error.
w.closeAfterReply = true
}
case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
tooBig = true
default:
discard = true
}
bdy.mu.Unlock()
default:
discard = true
}
if discard {
_, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1)
switch err {
case nil:
// There must be even more data left over.
tooBig = true
case ErrBodyReadAfterClose:
// Body was already consumed and closed.
case io.EOF:
// The remaining body was just consumed, close it.
err = w.reqBody.Close()
if err != nil {
w.closeAfterReply = true
}
default:
// Some other kind of error occurred, like a read timeout, or
// corrupt chunked encoding. In any case, whatever remains
// on the wire must not be parsed as another HTTP request.
w.closeAfterReply = true
}
}
if tooBig {
w.requestTooLarge()
delHeader("Connection")
setHeader.connection = "close"
}
}
code := w.status
if bodyAllowedForStatus(code) {
// If no content type, apply sniffing algorithm to body.
_, haveType := header["Content-Type"]
if !haveType && !hasTE && len(p) > 0 {
if cto := header.get("X-Content-Type-Options"); strings.EqualFold("nosniff", cto) {
// nosniff is an explicit directive not to guess a content-type.
// Content-sniffing is no less susceptible to polyglot attacks via
// hosted content when done on the server.
setHeader.contentType = "application/octet-stream"
w.conn.server.logf("http: WriteHeader called with X-Content-Type-Options:nosniff but no Content-Type")
} else {
setHeader.contentType = DetectContentType(p)
}
}
} else {
for _, k := range suppressedHeaders(code) {
delHeader(k)
}
}
if _, ok := header["Date"]; !ok {
setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
}
if hasCL && hasTE && te != "identity" {
// TODO: return an error if WriteHeader gets a return parameter
// For now just ignore the Content-Length.
w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
te, w.contentLength)
delHeader("Content-Length")
hasCL = false
}
if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) {
// do nothing
} else if code == StatusNoContent {
delHeader("Transfer-Encoding")
} else if hasCL {
delHeader("Transfer-Encoding")
} else if w.req.ProtoAtLeast(1, 1) {
// HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
// content-length has been provided. The connection must be closed after the
// reply is written, and no chunking is to be done. This is the setup
// recommended in the Server-Sent Events candidate recommendation 11,
// section 8.
if hasTE && te == "identity" {
cw.chunking = false
w.closeAfterReply = true
} else {
// HTTP/1.1 or greater: use chunked transfer encoding
// to avoid closing the connection at EOF.
cw.chunking = true
setHeader.transferEncoding = "chunked"
if hasTE && te == "chunked" {
// We will send the chunked Transfer-Encoding header later.
delHeader("Transfer-Encoding")
}
}
} else {
// HTTP version < 1.1: cannot do chunked transfer
// encoding and we don't know the Content-Length so
// signal EOF by closing connection.
w.closeAfterReply = true
delHeader("Transfer-Encoding") // in case already set
}
// Cannot use Content-Length with non-identity Transfer-Encoding.
if cw.chunking {
delHeader("Content-Length")
}
if !w.req.ProtoAtLeast(1, 0) {
return
}
if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) {
delHeader("Connection")
if w.req.ProtoAtLeast(1, 1) {
setHeader.connection = "close"
}
}
writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
cw.header.WriteSubset(w.conn.bufw, excludeHeader)
setHeader.Write(w.conn.bufw)
w.conn.bufw.Write(crlf)
}
// foreachHeaderElement splits v according to the "#rule" construction
// in RFC 7230 section 7 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v)
if v == "" {
return
}
if !strings.Contains(v, ",") {
fn(v)
return
}
for _, f := range strings.Split(v, ",") {
if f = textproto.TrimString(f); f != "" {
fn(f)
}
}
}
// writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
// to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
// code is the response status code.
// scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
if is11 {
bw.WriteString("HTTP/1.1 ")
} else {
bw.WriteString("HTTP/1.0 ")
}
if text, ok := statusText[code]; ok {
bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
bw.WriteByte(' ')
bw.WriteString(text)
bw.WriteString("\r\n")
} else {
// don't worry about performance
fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
}
}
// bodyAllowed reports whether a Write is allowed for this response type.
// It's illegal to call this before the header has been flushed.
func (w *response) bodyAllowed() bool {
if !w.wroteHeader {
panic("")
}
return bodyAllowedForStatus(w.status)
}
// The Life Of A Write is like this:
//
// Handler starts. No header has been sent. The handler can either
// write a header, or just start writing. Writing before sending a header
// sends an implicitly empty 200 OK header.
//
// If the handler didn't declare a Content-Length up front, we either
// go into chunking mode or, if the handler finishes running before
// the chunking buffer size, we compute a Content-Length and send that
// in the header instead.
//
// Likewise, if the handler didn't set a Content-Type, we sniff that
// from the initial chunk of output.
//
// The Writers are wired together like:
//
// 1. *response (the ResponseWriter) ->
// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes
// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
// and which writes the chunk headers, if needed.
// 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to ->
// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write
// and populates c.werr with it if so. but otherwise writes to:
// 6. the rwc, the net.Conn.
//
// TODO(bradfitz): short-circuit some of the buffering when the
// initial header contains both a Content-Type and Content-Length.
// Also short-circuit in (1) when the header's been sent and not in
// chunking mode, writing directly to (4) instead, if (2) has no
// buffered data. More generally, we could short-circuit from (1) to
// (3) even in chunking mode if the write size from (1) is over some
// threshold and nothing is in (2). The answer might be mostly making
// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
// with this instead.
func (w *response) Write(data []byte) (n int, err error) {
return w.write(len(data), data, "")
}
func (w *response) WriteString(data string) (n int, err error) {
return w.write(len(data), nil, data)
}
// either dataB or dataS is non-zero.
func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
if w.conn.hijacked() {
if lenData > 0 {
w.conn.server.logf("http: response.Write on hijacked connection")
}
return 0, ErrHijacked
}
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
if lenData == 0 {
return 0, nil
}
if !w.bodyAllowed() {
return 0, ErrBodyNotAllowed
}
w.written += int64(lenData) // ignoring errors, for errorKludge
if w.contentLength != -1 && w.written > w.contentLength {
return 0, ErrContentLength
}
if dataB != nil {
return w.w.Write(dataB)
} else {
return w.w.WriteString(dataS)
}
}
func (w *response) finishRequest() {
w.handlerDone.setTrue()
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
w.w.Flush()
putBufioWriter(w.w)
w.cw.close()
w.conn.bufw.Flush()
w.conn.r.abortPendingRead()
// Close the body (regardless of w.closeAfterReply) so we can
// re-use its bufio.Reader later safely.
w.reqBody.Close()
if w.req.MultipartForm != nil {
w.req.MultipartForm.RemoveAll()
}
}
// shouldReuseConnection reports whether the underlying TCP connection can be reused.
// It must only be called after the handler is done executing.
func (w *response) shouldReuseConnection() bool {
if w.closeAfterReply {
// The request or something set while executing the
// handler indicated we shouldn't reuse this
// connection.
return false
}
if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
// Did not write enough. Avoid getting out of sync.
return false
}
// There was some error writing to the underlying connection
// during the request, so don't re-use this conn.
if w.conn.werr != nil {
return false
}
if w.closedRequestBodyEarly() {
return false
}
return true
}
func (w *response) closedRequestBodyEarly() bool {
body, ok := w.req.Body.(*body)
return ok && body.didEarlyClose()
}
func (w *response) Flush() {
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
w.w.Flush()
w.cw.flush()
}
func (c *conn) finalFlush() {
if c.bufr != nil {
// Steal the bufio.Reader (~4KB worth of memory) and its associated
// reader for a future connection.
putBufioReader(c.bufr)
c.bufr = nil
}
if c.bufw != nil {
c.bufw.Flush()
// Steal the bufio.Writer (~4KB worth of memory) and its associated
// writer for a future connection.
putBufioWriter(c.bufw)
c.bufw = nil
}
}
// Close the connection.
func (c *conn) close() {
c.finalFlush()
c.rwc.Close()
}
// rstAvoidanceDelay is the amount of time we sleep after closing the
// write side of a TCP connection before closing the entire socket.
// By sleeping, we increase the chances that the client sees our FIN
// and processes its final data before they process the subsequent RST
// from closing a connection with known unread data.
// This RST seems to occur mostly on BSD systems. (And Windows?)
// This timeout is somewhat arbitrary (~latency around the planet).
const rstAvoidanceDelay = 500 * time.Millisecond
type closeWriter interface {
CloseWrite() error
}
var _ closeWriter = (*net.TCPConn)(nil)
// closeWrite flushes any outstanding data and sends a FIN packet (if
// client is connected via TCP), signalling that we're done. We then
// pause for a bit, hoping the client processes it before any
// subsequent RST.
//
// See https://golang.org/issue/3595
func (c *conn) closeWriteAndWait() {
c.finalFlush()
if tcp, ok := c.rwc.(closeWriter); ok {
tcp.CloseWrite()
}
time.Sleep(rstAvoidanceDelay)
}
// validNPN reports whether the proto is not a blacklisted Next
// Protocol Negotiation protocol. Empty and built-in protocol types
// are blacklisted and can't be overridden with alternate
// implementations.
func validNPN(proto string) bool {
switch proto {
case "", "http/1.1", "http/1.0":
return false
}
return true
}
func (c *conn) setState(nc net.Conn, state ConnState) {
srv := c.server
switch state {
case StateNew:
srv.trackConn(c, true)
case StateHijacked, StateClosed:
srv.trackConn(c, false)
}
c.curState.Store(connStateInterface[state])
if hook := srv.ConnState; hook != nil {
hook(nc, state)
}
}
// connStateInterface is an array of the interface{} versions of
// ConnState values, so we can use them in atomic.Values later without
// paying the cost of shoving their integers in an interface{}.
var connStateInterface = [...]interface{}{
StateNew: StateNew,
StateActive: StateActive,
StateIdle: StateIdle,
StateHijacked: StateHijacked,
StateClosed: StateClosed,
}
// badRequestError is a literal string (used by in the server in HTML,
// unescaped) to tell the user why their request was bad. It should
// be plain text without user info or other embedded errors.
type badRequestError string
func (e badRequestError) Error() string { return "Bad Request: " + string(e) }
// ErrAbortHandler is a sentinel panic value to abort a handler.
// While any panic from ServeHTTP aborts the response to the client,
// panicking with ErrAbortHandler also suppresses logging of a stack
// trace to the server's error log.
var ErrAbortHandler = errors.New("net/http: abort Handler")
// isCommonNetReadError reports whether err is a common error
// encountered during reading a request off the network when the
// client has gone away or had its read fail somehow. This is used to
// determine which logs are interesting enough to log about.
func isCommonNetReadError(err error) bool {
if err == io.EOF {
return true
}
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
return true
}
if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
return true
}
return false
}
// Serve a new connection.
func (c *conn) serve(ctx context.Context) {
c.remoteAddr = c.rwc.RemoteAddr().String()
ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
defer func() {
if err := recover(); err != nil && err != ErrAbortHandler {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
}
if !c.hijacked() {
c.close()
c.setState(c.rwc, StateClosed)
}
}()
if tlsConn, ok := c.rwc.(*tls.Conn); ok {
if d := c.server.ReadTimeout; d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
}
if d := c.server.WriteTimeout; d != 0 {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}
if err := tlsConn.Handshake(); err != nil {
c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
return
}
c.tlsState = new(tls.ConnectionState)
*c.tlsState = tlsConn.ConnectionState()
if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) {
if fn := c.server.TLSNextProto[proto]; fn != nil {
h := initNPNRequest{tlsConn, serverHandler{c.server}}
fn(c.server, tlsConn, h)
}
return
}
}
// HTTP/1.x from here on.
ctx, cancelCtx := context.WithCancel(ctx)
c.cancelCtx = cancelCtx
defer cancelCtx()
c.r = &connReader{conn: c}
c.bufr = newBufioReader(c.r)
c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
for {
w, err := c.readRequest(ctx)
if c.r.remain != c.server.initialReadLimitSize() {
// If we read any bytes off the wire, we're active.
c.setState(c.rwc, StateActive)
}
if err != nil {
const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
if err == errTooLarge {
// Their HTTP client may or may not be
// able to read this if we're
// responding to them and hanging up
// while they're still writing their
// request. Undefined behavior.
const publicErr = "431 Request Header Fields Too Large"
fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
c.closeWriteAndWait()
return
}
if isCommonNetReadError(err) {
return // don't reply
}
publicErr := "400 Bad Request"
if v, ok := err.(badRequestError); ok {
publicErr = publicErr + ": " + string(v)
}
fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
return
}
// Expect 100 Continue support
req := w.req
if req.expectsContinue() {
if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
// Wrap the Body reader with one that replies on the connection
req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
}
} else if req.Header.get("Expect") != "" {
w.sendExpectationFailed()
return
}
c.curReq.Store(w)
if requestBodyRemains(req.Body) {
registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
} else {
if w.conn.bufr.Buffered() > 0 {
w.conn.r.closeNotifyFromPipelinedRequest()
}
w.conn.r.startBackgroundRead()
}
// HTTP cannot have multiple simultaneous active requests.[*]
// Until the server replies to this request, it can't read another,
// so we might as well run the handler in this goroutine.
// [*] Not strictly true: HTTP pipelining. We could let them all process
// in parallel even if their responses need to be serialized.
// But we're not going to implement HTTP pipelining because it
// was never deployed in the wild and the answer is HTTP/2.
serverHandler{c.server}.ServeHTTP(w, w.req)
w.cancelCtx()
if c.hijacked() {
return
}
w.finishRequest()
if !w.shouldReuseConnection() {
if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
c.closeWriteAndWait()
}
return
}
c.setState(c.rwc, StateIdle)
c.curReq.Store((*response)(nil))
if !w.conn.server.doKeepAlives() {
// We're in shutdown mode. We might've replied
// to the user without "Connection: close" and
// they might think they can send another
// request, but such is life with HTTP/1.1.
return
}
if d := c.server.idleTimeout(); d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
if _, err := c.bufr.Peek(4); err != nil {
return
}
}
c.rwc.SetReadDeadline(time.Time{})
}
}
func (w *response) sendExpectationFailed() {
// TODO(bradfitz): let ServeHTTP handlers handle
// requests with non-standard expectation[s]? Seems
// theoretical at best, and doesn't fit into the
// current ServeHTTP model anyway. We'd need to
// make the ResponseWriter an optional
// "ExpectReplier" interface or something.
//
// For now we'll just obey RFC 7231 5.1.1 which says
// "A server that receives an Expect field-value other
// than 100-continue MAY respond with a 417 (Expectation
// Failed) status code to indicate that the unexpected
// expectation cannot be met."
w.Header().Set("Connection", "close")
w.WriteHeader(StatusExpectationFailed)
w.finishRequest()
}
// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
// and a Hijacker.
func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
if w.handlerDone.isSet() {
panic("net/http: Hijack called after ServeHTTP finished")
}
if w.wroteHeader {
w.cw.flush()
}
c := w.conn
c.mu.Lock()
defer c.mu.Unlock()
// Release the bufioWriter that writes to the chunk writer, it is not
// used after a connection has been hijacked.
rwc, buf, err = c.hijackLocked()
if err == nil {
putBufioWriter(w.w)
w.w = nil
}
return rwc, buf, err
}
func (w *response) CloseNotify() <-chan bool {
if w.handlerDone.isSet() {
panic("net/http: CloseNotify called after ServeHTTP finished")
}
return w.closeNotifyCh
}
func registerOnHitEOF(rc io.ReadCloser, fn func()) {
switch v := rc.(type) {
case *expectContinueReader:
registerOnHitEOF(v.readCloser, fn)
case *body:
v.registerOnHitEOF(fn)
default:
panic("unexpected type " + fmt.Sprintf("%T", rc))
}
}
// requestBodyRemains reports whether future calls to Read
// on rc might yield more data.
func requestBodyRemains(rc io.ReadCloser) bool {
if rc == NoBody {
return false
}
switch v := rc.(type) {
case *expectContinueReader:
return requestBodyRemains(v.readCloser)
case *body:
return v.bodyRemains()
default:
panic("unexpected type " + fmt.Sprintf("%T", rc))
}
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as HTTP handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler that calls f.
type HandlerFunc func(ResponseWriter, *Request)
// ServeHTTP calls f(w, r).
func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
f(w, r)
}
// Helper handlers
// Error replies to the request with the specified error message and HTTP code.
// It does not otherwise end the request; the caller should ensure no further
// writes are done to w.
// The error message should be plain text.
func Error(w ResponseWriter, error string, code int) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(code)
fmt.Fprintln(w, error)
}
// NotFound replies to the request with an HTTP 404 not found error.
func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
// NotFoundHandler returns a simple request handler
// that replies to each request with a ``404 page not found'' reply.
func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
// StripPrefix returns a handler that serves HTTP requests
// by removing the given prefix from the request URL's Path
// and invoking the handler h. StripPrefix handles a
// request for a path that doesn't begin with prefix by
// replying with an HTTP 404 not found error.
func StripPrefix(prefix string, h Handler) Handler {
if prefix == "" {
return h
}
return HandlerFunc(func(w ResponseWriter, r *Request) {
if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) {
r2 := new(Request)
*r2 = *r
r2.URL = new(url.URL)
*r2.URL = *r.URL
r2.URL.Path = p
h.ServeHTTP(w, r2)
} else {
NotFound(w, r)
}
})
}
// Redirect replies to the request with a redirect to url,
// which may be a path relative to the request path.
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
//
// If the Content-Type header has not been set, Redirect sets it
// to "text/html; charset=utf-8" and writes a small HTML body.
// Setting the Content-Type header to any value, including nil,
// disables that behavior.
func Redirect(w ResponseWriter, r *Request, url string, code int) {
// parseURL is just url.Parse (url is shadowed for godoc).
if u, err := parseURL(url); err == nil {
// If url was relative, make its path absolute by
// combining with request path.
// The client would probably do this for us,
// but doing it ourselves is more reliable.
// See RFC 7231, section 7.1.2
if u.Scheme == "" && u.Host == "" {
oldpath := r.URL.Path
if oldpath == "" { // should not happen, but avoid a crash if it does
oldpath = "/"
}
// no leading http://server
if url == "" || url[0] != '/' {
// make relative path absolute
olddir, _ := path.Split(oldpath)
url = olddir + url
}
var query string
if i := strings.Index(url, "?"); i != -1 {
url, query = url[:i], url[i:]
}
// clean up but preserve trailing slash
trailing := strings.HasSuffix(url, "/")
url = path.Clean(url)
if trailing && !strings.HasSuffix(url, "/") {
url += "/"
}
url += query
}
}
h := w.Header()
// RFC 7231 notes that a short HTML body is usually included in
// the response because older user agents may not understand 301/307.
// Do it only if the request didn't already have a Content-Type header.
_, hadCT := h["Content-Type"]
h.Set("Location", hexEscapeNonASCII(url))
if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
h.Set("Content-Type", "text/html; charset=utf-8")
}
w.WriteHeader(code)
// Shouldn't send the body for POST or HEAD; that leaves GET.
if !hadCT && r.Method == "GET" {
body := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n"
fmt.Fprintln(w, body)
}
}
// parseURL is just url.Parse. It exists only so that url.Parse can be called
// in places where url is shadowed for godoc. See https://golang.org/cl/49930.
var parseURL = url.Parse
var htmlReplacer = strings.NewReplacer(
"&", "&",
"<", "<",
">", ">",
// """ is shorter than """.
`"`, """,
// "'" is shorter than "'" and apos was not in HTML until HTML5.
"'", "'",
)
func htmlEscape(s string) string {
return htmlReplacer.Replace(s)
}
// Redirect to a fixed URL
type redirectHandler struct {
url string
code int
}
func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
Redirect(w, r, rh.url, rh.code)
}
// RedirectHandler returns a request handler that redirects
// each request it receives to the given url using the given
// status code.
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
func RedirectHandler(url string, code int) Handler {
return &redirectHandler{url, code}
}
// ServeMux is an HTTP request multiplexer.
// It matches the URL of each incoming request against a list of registered
// patterns and calls the handler for the pattern that
// most closely matches the URL.
//
// Patterns name fixed, rooted paths, like "/favicon.ico",
// or rooted subtrees, like "/images/" (note the trailing slash).
// Longer patterns take precedence over shorter ones, so that
// if there are handlers registered for both "/images/"
// and "/images/thumbnails/", the latter handler will be
// called for paths beginning "/images/thumbnails/" and the
// former will receive requests for any other paths in the
// "/images/" subtree.
//
// Note that since a pattern ending in a slash names a rooted subtree,
// the pattern "/" matches all paths not matched by other registered
// patterns, not just the URL with Path == "/".
//
// If a subtree has been registered and a request is received naming the
// subtree root without its trailing slash, ServeMux redirects that
// request to the subtree root (adding the trailing slash). This behavior can
// be overridden with a separate registration for the path without
// the trailing slash. For example, registering "/images/" causes ServeMux
// to redirect a request for "/images" to "/images/", unless "/images" has
// been registered separately.
//
// Patterns may optionally begin with a host name, restricting matches to
// URLs on that host only. Host-specific patterns take precedence over
// general patterns, so that a handler might register for the two patterns
// "/codesearch" and "codesearch.google.com/" without also taking over
// requests for "http://www.google.com/".
//
// ServeMux also takes care of sanitizing the URL request path,
// redirecting any request containing . or .. elements or repeated slashes
// to an equivalent, cleaner URL.
type ServeMux struct {
mu sync.RWMutex
m map[string]muxEntry
hosts bool // whether any patterns contain hostnames
}
type muxEntry struct {
h Handler
pattern string
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux { return new(ServeMux) }
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = &defaultServeMux
var defaultServeMux ServeMux
// Does path match pattern?
func pathMatch(pattern, path string) bool {
if len(pattern) == 0 {
// should not happen
return false
}
n := len(pattern)
if pattern[n-1] != '/' {
return pattern == path
}
return len(path) >= n && path[0:n] == pattern
}
// cleanPath returns the canonical path for p, eliminating . and .. elements.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}
// stripHostPort returns h without any trailing ":<port>".
func stripHostPort(h string) string {
// If no port on host, return unchanged
if strings.IndexByte(h, ':') == -1 {
return h
}
host, _, err := net.SplitHostPort(h)
if err != nil {
return h // on error, return unchanged
}
return host
}
// Find a handler on a handler map given a path string.
// Most-specific (longest) pattern wins.
func (mux *ServeMux) match(path string) (h Handler, pattern string) {
// Check for exact match first.
v, ok := mux.m[path]
if ok {
return v.h, v.pattern
}
// Check for longest valid match.
var n = 0
for k, v := range mux.m {
if !pathMatch(k, path) {
continue
}
if h == nil || len(k) > n {
n = len(k)
h = v.h
pattern = v.pattern
}
}
return
}
// redirectToPathSlash determines if the given path needs appending "/" to it.
// This occurs when a handler for path + "/" was already registered, but
// not for path itself. If the path needs appending to, it creates a new
// URL, setting the path to u.Path + "/" and returning true to indicate so.
func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
if !mux.shouldRedirect(host, path) {
return u, false
}
path = path + "/"
u = &url.URL{Path: path, RawQuery: u.RawQuery}
return u, true
}
// shouldRedirect reports whether the given path and host should be redirected to
// path+"/". This should happen if a handler is registered for path+"/" but
// not path -- see comments at ServeMux.
func (mux *ServeMux) shouldRedirect(host, path string) bool {
mux.mu.RLock()
defer mux.mu.RUnlock()
p := []string{path, host + path}
for _, c := range p {
if _, exist := mux.m[c]; exist {
return false
}
}
n := len(path)
if n == 0 {
return false
}
for _, c := range p {
if _, exist := mux.m[c+"/"]; exist {
return path[n-1] != '/'
}
}
return false
}
// Handler returns the handler to use for the given request,
// consulting r.Method, r.Host, and r.URL.Path. It always returns
// a non-nil handler. If the path is not in its canonical form, the
// handler will be an internally-generated handler that redirects
// to the canonical path. If the host contains a port, it is ignored
// when matching handlers.
//
// The path and host are used unchanged for CONNECT requests.
//
// Handler also returns the registered pattern that matches the
// request or, in the case of internally-generated redirects,
// the pattern that will match after following the redirect.
//
// If there is no registered handler that applies to the request,
// Handler returns a ``page not found'' handler and an empty pattern.
func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
// CONNECT requests are not canonicalized.
if r.Method == "CONNECT" {
// If r.URL.Path is /tree and its handler is not registered,
// the /tree -> /tree/ redirect applies to CONNECT requests
// but the path canonicalization does not.
if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
}
return mux.handler(r.Host, r.URL.Path)
}
// All other requests have any port stripped and path cleaned
// before passing to mux.handler.
host := stripHostPort(r.Host)
path := cleanPath(r.URL.Path)
// If the given path is /tree and its handler is not registered,
// redirect for /tree/.
if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
}
if path != r.URL.Path {
_, pattern = mux.handler(host, path)
url := *r.URL
url.Path = path
return RedirectHandler(url.String(), StatusMovedPermanently), pattern
}
return mux.handler(host, r.URL.Path)
}
// handler is the main implementation of Handler.
// The path is known to be in canonical form, except for CONNECT methods.
func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
mux.mu.RLock()
defer mux.mu.RUnlock()
// Host-specific pattern takes precedence over generic ones
if mux.hosts {
h, pattern = mux.match(host + path)
}
if h == nil {
h, pattern = mux.match(path)
}
if h == nil {
h, pattern = NotFoundHandler(), ""
}
return
}
// ServeHTTP dispatches the request to the handler whose
// pattern most closely matches the request URL.
func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
if r.RequestURI == "*" {
if r.ProtoAtLeast(1, 1) {
w.Header().Set("Connection", "close")
}
w.WriteHeader(StatusBadRequest)
return
}
h, _ := mux.Handler(r)
h.ServeHTTP(w, r)
}
// Handle registers the handler for the given pattern.
// If a handler already exists for pattern, Handle panics.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.mu.Lock()
defer mux.mu.Unlock()
if pattern == "" {
panic("http: invalid pattern")
}
if handler == nil {
panic("http: nil handler")
}
if _, exist := mux.m[pattern]; exist {
panic("http: multiple registrations for " + pattern)
}
if mux.m == nil {
mux.m = make(map[string]muxEntry)
}
mux.m[pattern] = muxEntry{h: handler, pattern: pattern}
if pattern[0] != '/' {
mux.hosts = true
}
}
// HandleFunc registers the handler function for the given pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
if handler == nil {
panic("http: nil handler")
}
mux.Handle(pattern, HandlerFunc(handler))
}
// Handle registers the handler for the given pattern
// in the DefaultServeMux.
// The documentation for ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleFunc registers the handler function for the given pattern
// in the DefaultServeMux.
// The documentation for ServeMux explains how patterns are matched.
func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
// Serve accepts incoming HTTP connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
// Handler is typically nil, in which case the DefaultServeMux is used.
func Serve(l net.Listener, handler Handler) error {
srv := &Server{Handler: handler}
return srv.Serve(l)
}
// ServeTLS accepts incoming HTTPS connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
//
// Handler is typically nil, in which case the DefaultServeMux is used.
//
// Additionally, files containing a certificate and matching private key
// for the server must be provided. If the certificate is signed by a
// certificate authority, the certFile should be the concatenation
// of the server's certificate, any intermediates, and the CA's certificate.
func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
srv := &Server{Handler: handler}
return srv.ServeTLS(l, certFile, keyFile)
}
// A Server defines parameters for running an HTTP server.
// The zero value for Server is a valid configuration.
type Server struct {
Addr string // TCP address to listen on, ":http" if empty
Handler Handler // handler to invoke, http.DefaultServeMux if nil
// TLSConfig optionally provides a TLS configuration for use
// by ServeTLS and ListenAndServeTLS. Note that this value is
// cloned by ServeTLS and ListenAndServeTLS, so it's not
// possible to modify the configuration with methods like
// tls.Config.SetSessionTicketKeys. To use
// SetSessionTicketKeys, use Server.Serve with a TLS Listener
// instead.
TLSConfig *tls.Config
// ReadTimeout is the maximum duration for reading the entire
// request, including the body.
//
// Because ReadTimeout does not let Handlers make per-request
// decisions on each request body's acceptable deadline or
// upload rate, most users will prefer to use
// ReadHeaderTimeout. It is valid to use them both.
ReadTimeout time.Duration
// ReadHeaderTimeout is the amount of time allowed to read
// request headers. The connection's read deadline is reset
// after reading the headers and the Handler can decide what
// is considered too slow for the body.
ReadHeaderTimeout time.Duration
// WriteTimeout is the maximum duration before timing out
// writes of the response. It is reset whenever a new
// request's header is read. Like ReadTimeout, it does not
// let Handlers make decisions on a per-request basis.
WriteTimeout time.Duration
// IdleTimeout is the maximum amount of time to wait for the
// next request when keep-alives are enabled. If IdleTimeout
// is zero, the value of ReadTimeout is used. If both are
// zero, ReadHeaderTimeout is used.
IdleTimeout time.Duration
// MaxHeaderBytes controls the maximum number of bytes the
// server will read parsing the request header's keys and
// values, including the request line. It does not limit the
// size of the request body.
// If zero, DefaultMaxHeaderBytes is used.
MaxHeaderBytes int
// TLSNextProto optionally specifies a function to take over
// ownership of the provided TLS connection when an NPN/ALPN
// protocol upgrade has occurred. The map key is the protocol
// name negotiated. The Handler argument should be used to
// handle HTTP requests and will initialize the Request's TLS
// and RemoteAddr if not already set. The connection is
// automatically closed when the function returns.
// If TLSNextProto is not nil, HTTP/2 support is not enabled
// automatically.
TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
// ConnState specifies an optional callback function that is
// called when a client connection changes state. See the
// ConnState type and associated constants for details.
ConnState func(net.Conn, ConnState)
// ErrorLog specifies an optional logger for errors accepting
// connections, unexpected behavior from handlers, and
// underlying FileSystem errors.
// If nil, logging is done via the log package's standard logger.
ErrorLog *log.Logger
disableKeepAlives int32 // accessed atomically.
inShutdown int32 // accessed atomically (non-zero means we're in Shutdown)
nextProtoOnce sync.Once // guards setupHTTP2_* init
nextProtoErr error // result of http2.ConfigureServer if used
mu sync.Mutex
listeners map[*net.Listener]struct{}
activeConn map[*conn]struct{}
doneChan chan struct{}
onShutdown []func()
}
func (s *Server) getDoneChan() <-chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
return s.getDoneChanLocked()
}
func (s *Server) getDoneChanLocked() chan struct{} {
if s.doneChan == nil {
s.doneChan = make(chan struct{})
}
return s.doneChan
}
func (s *Server) closeDoneChanLocked() {
ch := s.getDoneChanLocked()
select {
case <-ch:
// Already closed. Don't close again.
default:
// Safe to close here. We're the only closer, guarded
// by s.mu.
close(ch)
}
}
// Close immediately closes all active net.Listeners and any
// connections in state StateNew, StateActive, or StateIdle. For a
// graceful shutdown, use Shutdown.
//
// Close does not attempt to close (and does not even know about)
// any hijacked connections, such as WebSockets.
//
// Close returns any error returned from closing the Server's
// underlying Listener(s).
func (srv *Server) Close() error {
srv.mu.Lock()
defer srv.mu.Unlock()
srv.closeDoneChanLocked()
err := srv.closeListenersLocked()
for c := range srv.activeConn {
c.rwc.Close()
delete(srv.activeConn, c)
}
return err
}
// shutdownPollInterval is how often we poll for quiescence
// during Server.Shutdown. This is lower during tests, to
// speed up tests.
// Ideally we could find a solution that doesn't involve polling,
// but which also doesn't have a high runtime cost (and doesn't
// involve any contentious mutexes), but that is left as an
// exercise for the reader.
var shutdownPollInterval = 500 * time.Millisecond
// Shutdown gracefully shuts down the server without interrupting any
// active connections. Shutdown works by first closing all open
// listeners, then closing all idle connections, and then waiting
// indefinitely for connections to return to idle and then shut down.
// If the provided context expires before the shutdown is complete,
// Shutdown returns the context's error, otherwise it returns any
// error returned from closing the Server's underlying Listener(s).
//
// When Shutdown is called, Serve, ListenAndServe, and
// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
// program doesn't exit and waits instead for Shutdown to return.
//
// Shutdown does not attempt to close nor wait for hijacked
// connections such as WebSockets. The caller of Shutdown should
// separately notify such long-lived connections of shutdown and wait
// for them to close, if desired. See RegisterOnShutdown for a way to
// register shutdown notification functions.
func (srv *Server) Shutdown(ctx context.Context) error {
atomic.AddInt32(&srv.inShutdown, 1)
defer atomic.AddInt32(&srv.inShutdown, -1)
srv.mu.Lock()
lnerr := srv.closeListenersLocked()
srv.closeDoneChanLocked()
for _, f := range srv.onShutdown {
go f()
}
srv.mu.Unlock()
ticker := time.NewTicker(shutdownPollInterval)
defer ticker.Stop()
for {
if srv.closeIdleConns() {
return lnerr
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
// RegisterOnShutdown registers a function to call on Shutdown.
// This can be used to gracefully shutdown connections that have
// undergone NPN/ALPN protocol upgrade or that have been hijacked.
// This function should start protocol-specific graceful shutdown,
// but should not wait for shutdown to complete.
func (srv *Server) RegisterOnShutdown(f func()) {
srv.mu.Lock()
srv.onShutdown = append(srv.onShutdown, f)
srv.mu.Unlock()
}
// closeIdleConns closes all idle connections and reports whether the
// server is quiescent.
func (s *Server) closeIdleConns() bool {
s.mu.Lock()
defer s.mu.Unlock()
quiescent := true
for c := range s.activeConn {
st, ok := c.curState.Load().(ConnState)
if !ok || st != StateIdle {
quiescent = false
continue
}
c.rwc.Close()
delete(s.activeConn, c)
}
return quiescent
}
func (s *Server) closeListenersLocked() error {
var err error
for ln := range s.listeners {
if cerr := (*ln).Close(); cerr != nil && err == nil {
err = cerr
}
delete(s.listeners, ln)
}
return err
}
// A ConnState represents the state of a client connection to a server.
// It's used by the optional Server.ConnState hook.
type ConnState int
const (
// StateNew represents a new connection that is expected to
// send a request immediately. Connections begin at this
// state and then transition to either StateActive or
// StateClosed.
StateNew ConnState = iota
// StateActive represents a connection that has read 1 or more
// bytes of a request. The Server.ConnState hook for
// StateActive fires before the request has entered a handler
// and doesn't fire again until the request has been
// handled. After the request is handled, the state
// transitions to StateClosed, StateHijacked, or StateIdle.
// For HTTP/2, StateActive fires on the transition from zero
// to one active request, and only transitions away once all
// active requests are complete. That means that ConnState
// cannot be used to do per-request work; ConnState only notes
// the overall state of the connection.
StateActive
// StateIdle represents a connection that has finished
// handling a request and is in the keep-alive state, waiting
// for a new request. Connections transition from StateIdle
// to either StateActive or StateClosed.
StateIdle
// StateHijacked represents a hijacked connection.
// This is a terminal state. It does not transition to StateClosed.
StateHijacked
// StateClosed represents a closed connection.
// This is a terminal state. Hijacked connections do not
// transition to StateClosed.
StateClosed
)
var stateName = map[ConnState]string{
StateNew: "new",
StateActive: "active",
StateIdle: "idle",
StateHijacked: "hijacked",
StateClosed: "closed",
}
func (c ConnState) String() string {
return stateName[c]
}
// serverHandler delegates to either the server's Handler or
// DefaultServeMux and also handles "OPTIONS *" requests.
type serverHandler struct {
srv *Server
}
func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
handler := sh.srv.Handler
if handler == nil {
handler = DefaultServeMux
}
if req.RequestURI == "*" && req.Method == "OPTIONS" {
handler = globalOptionsHandler{}
}
handler.ServeHTTP(rw, req)
}
// ListenAndServe listens on the TCP network address srv.Addr and then
// calls Serve to handle requests on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
// If srv.Addr is blank, ":http" is used.
// ListenAndServe always returns a non-nil error.
func (srv *Server) ListenAndServe() error {
addr := srv.Addr
if addr == "" {
addr = ":http"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)})
}
var testHookServerServe func(*Server, net.Listener) // used if non-nil
// shouldDoServeHTTP2 reports whether Server.Serve should configure
// automatic HTTP/2. (which sets up the srv.TLSNextProto map)
func (srv *Server) shouldConfigureHTTP2ForServe() bool {
if srv.TLSConfig == nil {
// Compatibility with Go 1.6:
// If there's no TLSConfig, it's possible that the user just
// didn't set it on the http.Server, but did pass it to
// tls.NewListener and passed that listener to Serve.
// So we should configure HTTP/2 (to set up srv.TLSNextProto)
// in case the listener returns an "h2" *tls.Conn.
return true
}
// The user specified a TLSConfig on their http.Server.
// In this, case, only configure HTTP/2 if their tls.Config
// explicitly mentions "h2". Otherwise http2.ConfigureServer
// would modify the tls.Config to add it, but they probably already
// passed this tls.Config to tls.NewListener. And if they did,
// it's too late anyway to fix it. It would only be potentially racy.
// See Issue 15908.
return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
}
// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
// and ListenAndServeTLS methods after a call to Shutdown or Close.
var ErrServerClosed = errors.New("http: Server closed")
// Serve accepts incoming connections on the Listener l, creating a
// new service goroutine for each. The service goroutines read requests and
// then call srv.Handler to reply to them.
//
// For HTTP/2 support, srv.TLSConfig should be initialized to the
// provided listener's TLS Config before calling Serve. If
// srv.TLSConfig is non-nil and doesn't include the string "h2" in
// Config.NextProtos, HTTP/2 support is not enabled.
//
// Serve always returns a non-nil error. After Shutdown or Close, the
// returned error is ErrServerClosed.
func (srv *Server) Serve(l net.Listener) error {
defer l.Close()
if fn := testHookServerServe; fn != nil {
fn(srv, l)
}
var tempDelay time.Duration // how long to sleep on accept failure
if err := srv.setupHTTP2_Serve(); err != nil {
return err
}
srv.trackListener(&l, true)
defer srv.trackListener(&l, false)
baseCtx := context.Background() // base is always background, per Issue 16220
ctx := context.WithValue(baseCtx, ServerContextKey, srv)
for {
rw, e := l.Accept()
if e != nil {
select {
case <-srv.getDoneChan():
return ErrServerClosed
default:
}
if ne, ok := e.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay)
time.Sleep(tempDelay)
continue
}
return e
}
tempDelay = 0
c := srv.newConn(rw)
c.setState(c.rwc, StateNew) // before Serve can return
go c.serve(ctx)
}
}
// ServeTLS accepts incoming connections on the Listener l, creating a
// new service goroutine for each. The service goroutines read requests and
// then call srv.Handler to reply to them.
//
// Additionally, files containing a certificate and matching private key for
// the server must be provided if neither the Server's TLSConfig.Certificates
// nor TLSConfig.GetCertificate are populated.. If the certificate is signed by
// a certificate authority, the certFile should be the concatenation of the
// server's certificate, any intermediates, and the CA's certificate.
//
// For HTTP/2 support, srv.TLSConfig should be initialized to the
// provided listener's TLS Config before calling ServeTLS. If
// srv.TLSConfig is non-nil and doesn't include the string "h2" in
// Config.NextProtos, HTTP/2 support is not enabled.
//
// ServeTLS always returns a non-nil error. After Shutdown or Close, the
// returned error is ErrServerClosed.
func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
// before we clone it and create the TLS Listener.
if err := srv.setupHTTP2_ServeTLS(); err != nil {
return err
}
config := cloneTLSConfig(srv.TLSConfig)
if !strSliceContains(config.NextProtos, "http/1.1") {
config.NextProtos = append(config.NextProtos, "http/1.1")
}
configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
if !configHasCert || certFile != "" || keyFile != "" {
var err error
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
}
tlsListener := tls.NewListener(l, config)
return srv.Serve(tlsListener)
}
// trackListener adds or removes a net.Listener to the set of tracked
// listeners.
//
// We store a pointer to interface in the map set, in case the
// net.Listener is not comparable. This is safe because we only call
// trackListener via Serve and can track+defer untrack the same
// pointer to local variable there. We never need to compare a
// Listener from another caller.
func (s *Server) trackListener(ln *net.Listener, add bool) {
s.mu.Lock()
defer s.mu.Unlock()
if s.listeners == nil {
s.listeners = make(map[*net.Listener]struct{})
}
if add {
// If the *Server is being reused after a previous
// Close or Shutdown, reset its doneChan:
if len(s.listeners) == 0 && len(s.activeConn) == 0 {
s.doneChan = nil
}
s.listeners[ln] = struct{}{}
} else {
delete(s.listeners, ln)
}
}
func (s *Server) trackConn(c *conn, add bool) {
s.mu.Lock()
defer s.mu.Unlock()
if s.activeConn == nil {
s.activeConn = make(map[*conn]struct{})
}
if add {
s.activeConn[c] = struct{}{}
} else {
delete(s.activeConn, c)
}
}
func (s *Server) idleTimeout() time.Duration {
if s.IdleTimeout != 0 {
return s.IdleTimeout
}
return s.ReadTimeout
}
func (s *Server) readHeaderTimeout() time.Duration {
if s.ReadHeaderTimeout != 0 {
return s.ReadHeaderTimeout
}
return s.ReadTimeout
}
func (s *Server) doKeepAlives() bool {
return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown()
}
func (s *Server) shuttingDown() bool {
return atomic.LoadInt32(&s.inShutdown) != 0
}
// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
// By default, keep-alives are always enabled. Only very
// resource-constrained environments or servers in the process of
// shutting down should disable them.
func (srv *Server) SetKeepAlivesEnabled(v bool) {
if v {
atomic.StoreInt32(&srv.disableKeepAlives, 0)
return
}
atomic.StoreInt32(&srv.disableKeepAlives, 1)
// Close idle HTTP/1 conns:
srv.closeIdleConns()
// Close HTTP/2 conns, as soon as they become idle, but reset
// the chan so future conns (if the listener is still active)
// still work and don't get a GOAWAY immediately, before their
// first request:
srv.mu.Lock()
defer srv.mu.Unlock()
srv.closeDoneChanLocked() // closes http2 conns
srv.doneChan = nil
}
func (s *Server) logf(format string, args ...interface{}) {
if s.ErrorLog != nil {
s.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// logf prints to the ErrorLog of the *Server associated with request r
// via ServerContextKey. If there's no associated server, or if ErrorLog
// is nil, logging is done via the log package's standard logger.
func logf(r *Request, format string, args ...interface{}) {
s, _ := r.Context().Value(ServerContextKey).(*Server)
if s != nil && s.ErrorLog != nil {
s.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// ListenAndServe listens on the TCP network address addr
// and then calls Serve with handler to handle requests
// on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
// Handler is typically nil, in which case the DefaultServeMux is
// used.
//
// A trivial example server is:
//
// package main
//
// import (
// "io"
// "net/http"
// "log"
// )
//
// // hello world, the web server
// func HelloServer(w http.ResponseWriter, req *http.Request) {
// io.WriteString(w, "hello, world!\n")
// }
//
// func main() {
// http.HandleFunc("/hello", HelloServer)
// log.Fatal(http.ListenAndServe(":12345", nil))
// }
//
// ListenAndServe always returns a non-nil error.
func ListenAndServe(addr string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts identically to ListenAndServe, except that it
// expects HTTPS connections. Additionally, files containing a certificate and
// matching private key for the server must be provided. If the certificate
// is signed by a certificate authority, the certFile should be the concatenation
// of the server's certificate, any intermediates, and the CA's certificate.
//
// A trivial example server is:
//
// import (
// "log"
// "net/http"
// )
//
// func handler(w http.ResponseWriter, req *http.Request) {
// w.Header().Set("Content-Type", "text/plain")
// w.Write([]byte("This is an example server.\n"))
// }
//
// func main() {
// http.HandleFunc("/", handler)
// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/")
// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil)
// log.Fatal(err)
// }
//
// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem.
//
// ListenAndServeTLS always returns a non-nil error.
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServeTLS(certFile, keyFile)
}
// ListenAndServeTLS listens on the TCP network address srv.Addr and
// then calls Serve to handle requests on incoming TLS connections.
// Accepted connections are configured to enable TCP keep-alives.
//
// Filenames containing a certificate and matching private key for the
// server must be provided if neither the Server's TLSConfig.Certificates
// nor TLSConfig.GetCertificate are populated. If the certificate is
// signed by a certificate authority, the certFile should be the
// concatenation of the server's certificate, any intermediates, and
// the CA's certificate.
//
// If srv.Addr is blank, ":https" is used.
//
// ListenAndServeTLS always returns a non-nil error.
func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
addr := srv.Addr
if addr == "" {
addr = ":https"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
defer ln.Close()
return srv.ServeTLS(tcpKeepAliveListener{ln.(*net.TCPListener)}, certFile, keyFile)
}
// setupHTTP2_ServeTLS conditionally configures HTTP/2 on
// srv and returns whether there was an error setting it up. If it is
// not configured for policy reasons, nil is returned.
func (srv *Server) setupHTTP2_ServeTLS() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
return srv.nextProtoErr
}
// setupHTTP2_Serve is called from (*Server).Serve and conditionally
// configures HTTP/2 on srv using a more conservative policy than
// setupHTTP2_ServeTLS because Serve may be called
// concurrently.
//
// The tests named TestTransportAutomaticHTTP2* and
// TestConcurrentServerServe in server_test.go demonstrate some
// of the supported use cases and motivations.
func (srv *Server) setupHTTP2_Serve() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
return srv.nextProtoErr
}
func (srv *Server) onceSetNextProtoDefaults_Serve() {
if srv.shouldConfigureHTTP2ForServe() {
srv.onceSetNextProtoDefaults()
}
}
// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
// configured otherwise. (by setting srv.TLSNextProto non-nil)
// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
func (srv *Server) onceSetNextProtoDefaults() {
if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") {
return
}
// Enable HTTP/2 by default if the user hasn't otherwise
// configured their TLSNextProto map.
if srv.TLSNextProto == nil {
conf := &http2Server{
NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
}
srv.nextProtoErr = http2ConfigureServer(srv, conf)
}
}
// TimeoutHandler returns a Handler that runs h with the given time limit.
//
// The new Handler calls h.ServeHTTP to handle each request, but if a
// call runs for longer than its time limit, the handler responds with
// a 503 Service Unavailable error and the given message in its body.
// (If msg is empty, a suitable default message will be sent.)
// After such a timeout, writes by h to its ResponseWriter will return
// ErrHandlerTimeout.
//
// TimeoutHandler buffers all Handler writes to memory and does not
// support the Hijacker or Flusher interfaces.
func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
return &timeoutHandler{
handler: h,
body: msg,
dt: dt,
}
}
// ErrHandlerTimeout is returned on ResponseWriter Write calls
// in handlers which have timed out.
var ErrHandlerTimeout = errors.New("http: Handler timeout")
type timeoutHandler struct {
handler Handler
body string
dt time.Duration
// When set, no context will be created and this context will
// be used instead.
testContext context.Context
}
func (h *timeoutHandler) errorBody() string {
if h.body != "" {
return h.body
}
return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
}
func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
ctx := h.testContext
if ctx == nil {
var cancelCtx context.CancelFunc
ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
defer cancelCtx()
}
r = r.WithContext(ctx)
done := make(chan struct{})
tw := &timeoutWriter{
w: w,
h: make(Header),
}
panicChan := make(chan interface{}, 1)
go func() {
defer func() {
if p := recover(); p != nil {
panicChan <- p
}
}()
h.handler.ServeHTTP(tw, r)
close(done)
}()
select {
case p := <-panicChan:
panic(p)
case <-done:
tw.mu.Lock()
defer tw.mu.Unlock()
dst := w.Header()
for k, vv := range tw.h {
dst[k] = vv
}
if !tw.wroteHeader {
tw.code = StatusOK
}
w.WriteHeader(tw.code)
w.Write(tw.wbuf.Bytes())
case <-ctx.Done():
tw.mu.Lock()
defer tw.mu.Unlock()
w.WriteHeader(StatusServiceUnavailable)
io.WriteString(w, h.errorBody())
tw.timedOut = true
return
}
}
type timeoutWriter struct {
w ResponseWriter
h Header
wbuf bytes.Buffer
mu sync.Mutex
timedOut bool
wroteHeader bool
code int
}
func (tw *timeoutWriter) Header() Header { return tw.h }
func (tw *timeoutWriter) Write(p []byte) (int, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut {
return 0, ErrHandlerTimeout
}
if !tw.wroteHeader {
tw.writeHeader(StatusOK)
}
return tw.wbuf.Write(p)
}
func (tw *timeoutWriter) WriteHeader(code int) {
checkWriteHeaderCode(code)
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut || tw.wroteHeader {
return
}
tw.writeHeader(code)
}
func (tw *timeoutWriter) writeHeader(code int) {
tw.wroteHeader = true
tw.code = code
}
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
// connections. It's used by ListenAndServe and ListenAndServeTLS so
// dead TCP connections (e.g. closing laptop mid-download) eventually
// go away.
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (net.Conn, error) {
tc, err := ln.AcceptTCP()
if err != nil {
return nil, err
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
// globalOptionsHandler responds to "OPTIONS *" requests.
type globalOptionsHandler struct{}
func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
w.Header().Set("Content-Length", "0")
if r.ContentLength != 0 {
// Read up to 4KB of OPTIONS body (as mentioned in the
// spec as being reserved for future use), but anything
// over that is considered a waste of server resources
// (or an attack) and we abort and close the connection,
// courtesy of MaxBytesReader's EOF behavior.
mb := MaxBytesReader(w, r.Body, 4<<10)
io.Copy(ioutil.Discard, mb)
}
}
// initNPNRequest is an HTTP handler that initializes certain
// uninitialized fields in its *Request. Such partially-initialized
// Requests come from NPN protocol handlers.
type initNPNRequest struct {
c *tls.Conn
h serverHandler
}
func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
if req.TLS == nil {
req.TLS = &tls.ConnectionState{}
*req.TLS = h.c.ConnectionState()
}
if req.Body == nil {
req.Body = NoBody
}
if req.RemoteAddr == "" {
req.RemoteAddr = h.c.RemoteAddr().String()
}
h.h.ServeHTTP(rw, req)
}
// loggingConn is used for debugging.
type loggingConn struct {
name string
net.Conn
}
var (
uniqNameMu sync.Mutex
uniqNameNext = make(map[string]int)
)
func newLoggingConn(baseName string, c net.Conn) net.Conn {
uniqNameMu.Lock()
defer uniqNameMu.Unlock()
uniqNameNext[baseName]++
return &loggingConn{
name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
Conn: c,
}
}
func (c *loggingConn) Write(p []byte) (n int, err error) {
log.Printf("%s.Write(%d) = ....", c.name, len(p))
n, err = c.Conn.Write(p)
log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
return
}
func (c *loggingConn) Read(p []byte) (n int, err error) {
log.Printf("%s.Read(%d) = ....", c.name, len(p))
n, err = c.Conn.Read(p)
log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
return
}
func (c *loggingConn) Close() (err error) {
log.Printf("%s.Close() = ...", c.name)
err = c.Conn.Close()
log.Printf("%s.Close() = %v", c.name, err)
return
}
// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
// It only contains one field (and a pointer field at that), so it
// fits in an interface value without an extra allocation.
type checkConnErrorWriter struct {
c *conn
}
func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
n, err = w.c.rwc.Write(p)
if err != nil && w.c.werr == nil {
w.c.werr = err
w.c.cancelCtx()
}
return
}
func numLeadingCRorLF(v []byte) (n int) {
for _, b := range v {
if b == '\r' || b == '\n' {
n++
continue
}
break
}
return
}
func strSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
tools/sync-groups/federation_test.go
|
// Copyright (C) The Arvados Authors. All rights reserved.
//
// SPDX-License-Identifier: AGPL-3.0
package main
import (
"context"
"net"
"os"
"time"
"git.arvados.org/arvados.git/lib/boot"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&FederationSuite{})
var origAPIHost, origAPIToken string
type FederationSuite struct {
super *boot.Supervisor
}
func (s *FederationSuite) SetUpSuite(c *check.C) {
origAPIHost = os.Getenv("ARVADOS_API_HOST")
origAPIToken = os.Getenv("ARVADOS_API_TOKEN")
hostport := map[string]string{}
for _, id := range []string{"z1111", "z2222"} {
hostport[id] = func() string {
// TODO: Instead of expecting random ports on
// 127.0.0.11, 22 to be race-safe, try
// different 127.x.y.z until finding one that
// isn't in use.
ln, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
ln.Close()
_, port, err := net.SplitHostPort(ln.Addr().String())
c.Assert(err, check.IsNil)
return "127.0.0." + id[3:] + ":" + port
}()
}
yaml := "Clusters:\n"
for id := range hostport {
yaml += `
` + id + `:
Services:
Controller:
ExternalURL: https://` + hostport[id] + `
TLS:
Insecure: true
SystemLogs:
Format: text
RemoteClusters:
z1111:
Host: ` + hostport["z1111"] + `
Scheme: https
Insecure: true
Proxy: true
ActivateUsers: true
`
if id != "z2222" {
yaml += ` z2222:
Host: ` + hostport["z2222"] + `
Scheme: https
Insecure: true
Proxy: true
ActivateUsers: true
`
}
if id == "z1111" {
yaml += `
Login:
LoginCluster: z1111
PAM:
Enable: true
`
} else {
yaml += `
Login:
LoginCluster: z1111
`
}
}
s.super = &boot.Supervisor{
ClusterType: "test",
ConfigYAML: yaml,
Stderr: ctxlog.LogWriter(c.Log),
NoWorkbench1: true,
NoWorkbench2: true,
OwnTemporaryDatabase: true,
}
// Give up if startup takes longer than 3m
timeout := time.AfterFunc(3*time.Minute, s.super.Stop)
defer timeout.Stop()
s.super.Start(context.Background())
ok := s.super.WaitReady()
c.Assert(ok, check.Equals, true)
// Activate user, make it admin.
conn1 := s.super.Conn("z1111")
rootctx1, _, _ := s.super.RootClients("z1111")
userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "[email protected]", true)
user1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})
c.Assert(err, check.IsNil)
c.Assert(user1.IsAdmin, check.Equals, false)
user1, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
UUID: user1.UUID,
Attrs: map[string]interface{}{
"is_admin": true,
},
})
c.Assert(err, check.IsNil)
c.Assert(user1.IsAdmin, check.Equals, true)
}
func (s *FederationSuite) TearDownSuite(c *check.C) {
s.super.Stop()
_ = os.Setenv("ARVADOS_API_HOST", origAPIHost)
_ = os.Setenv("ARVADOS_API_TOKEN", origAPIToken)
}
func (s *FederationSuite) TestGroupSyncingOnFederatedCluster(c *check.C) {
// Get admin user's V2 token
conn1 := s.super.Conn("z1111")
rootctx1, _, _ := s.super.RootClients("z1111")
userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "[email protected]", true)
user1Auth, err := conn1.APIClientAuthorizationCurrent(userctx1, arvados.GetOptions{})
c.Check(err, check.IsNil)
userV2Token := user1Auth.TokenV2()
// Get federated admin clients on z2222 to set up environment
conn2 := s.super.Conn("z2222")
userctx2, userac2, _ := s.super.ClientsWithToken("z2222", userV2Token)
user2, err := conn2.UserGetCurrent(userctx2, arvados.GetOptions{})
c.Check(err, check.IsNil)
c.Check(user2.IsAdmin, check.Equals, true)
// Set up environment for sync-groups using admin user credentials on z2222
err = os.Setenv("ARVADOS_API_HOST", userac2.APIHost)
c.Assert(err, check.IsNil)
err = os.Setenv("ARVADOS_API_TOKEN", userac2.AuthToken)
c.Assert(err, check.IsNil)
// Check that no parent group is created
gl := arvados.GroupList{}
params := arvados.ResourceListParams{
Filters: []arvados.Filter{{
Attr: "owner_uuid",
Operator: "=",
Operand: s.super.Cluster("z2222").ClusterID + "-tpzed-000000000000000",
}, {
Attr: "name",
Operator: "=",
Operand: "Externally synchronized groups",
}},
}
err = userac2.RequestAndDecode(&gl, "GET", "/arvados/v1/groups", nil, params)
c.Assert(err, check.IsNil)
c.Assert(gl.ItemsAvailable, check.Equals, 0)
// Set up config, confirm that the parent group was created
os.Args = []string{"cmd", "somefile.csv"}
config, err := GetConfig()
c.Assert(err, check.IsNil)
userac2.RequestAndDecode(&gl, "GET", "/arvados/v1/groups", nil, params)
c.Assert(gl.ItemsAvailable, check.Equals, 1)
// Run the tool with custom config
data := [][]string{
{"TestGroup1", user2.Email},
}
tmpfile, err := MakeTempCSVFile(data)
c.Assert(err, check.IsNil)
defer os.Remove(tmpfile.Name()) // clean up
config.Path = tmpfile.Name()
err = doMain(&config)
c.Assert(err, check.IsNil)
// Check the group was created correctly, and has the user as a member
groupUUID, err := RemoteGroupExists(&config, "TestGroup1")
c.Assert(err, check.IsNil)
c.Assert(groupUUID, check.Not(check.Equals), "")
c.Assert(GroupMembershipExists(config.Client, user2.UUID, groupUUID, "can_write"), check.Equals, true)
}
|
[
"\"ARVADOS_API_HOST\"",
"\"ARVADOS_API_TOKEN\""
] |
[] |
[
"ARVADOS_API_HOST",
"ARVADOS_API_TOKEN"
] |
[]
|
["ARVADOS_API_HOST", "ARVADOS_API_TOKEN"]
|
go
| 2 | 0 | |
config.py
|
"""
Configuration file for Flask and Flask-SQLAlchemy modules.
All environment variables are stored in local .env file.
"""
import os
from dotenv import load_dotenv
load_dotenv() #load environment variables from .env file
class Config(object):
db_host = os.environ.get('DB_HOST')
db_name = os.environ.get('DB_NAME')
db_password = os.environ.get('DB_PASSWORD')
db_port = os.environ.get('DB_PORT')
db_user = os.environ.get('DB_USERNAME')
SQLALCHEMY_DATABASE_URI = f"postgresql://{db_user}:{db_password}@{db_host}:{db_port}/{db_name}"
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
PROPAGATE_EXCEPTIONS = True
|
[] |
[] |
[
"DB_PASSWORD",
"DB_HOST",
"FLASK_SECRET_KEY",
"DB_PORT",
"DB_USERNAME",
"DB_NAME"
] |
[]
|
["DB_PASSWORD", "DB_HOST", "FLASK_SECRET_KEY", "DB_PORT", "DB_USERNAME", "DB_NAME"]
|
python
| 6 | 0 | |
cmd/coordinator/remote.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.13
// +build linux darwin
// Code related to remote buildlets. See x/build/remote-buildlet.txt
package main // import "golang.org/x/build/cmd/coordinator"
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"html"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unsafe"
"cloud.google.com/go/compute/metadata"
"github.com/gliderlabs/ssh"
"github.com/kr/pty"
"golang.org/x/build/buildlet"
"golang.org/x/build/dashboard"
"golang.org/x/build/internal/gophers"
"golang.org/x/build/types"
gossh "golang.org/x/crypto/ssh"
)
var (
remoteBuildlets = struct {
sync.Mutex
m map[string]*remoteBuildlet // keyed by buildletName
}{m: map[string]*remoteBuildlet{}}
cleanTimer *time.Timer
)
const (
remoteBuildletIdleTimeout = 30 * time.Minute
remoteBuildletCleanInterval = time.Minute
)
func init() {
cleanTimer = time.AfterFunc(remoteBuildletCleanInterval, expireBuildlets)
}
type remoteBuildlet struct {
User string // "user-foo" build key
Name string // dup of key
HostType string
BuilderType string // default builder config to use if not overwritten
Created time.Time
Expires time.Time
buildlet *buildlet.Client
}
// renew renews rb's idle timeout if ctx hasn't expired.
// renew should run in its own goroutine.
func (rb *remoteBuildlet) renew(ctx context.Context) {
remoteBuildlets.Lock()
defer remoteBuildlets.Unlock()
select {
case <-ctx.Done():
return
default:
}
if got := remoteBuildlets.m[rb.Name]; got == rb {
rb.Expires = time.Now().Add(remoteBuildletIdleTimeout)
time.AfterFunc(time.Minute, func() { rb.renew(ctx) })
}
}
func addRemoteBuildlet(rb *remoteBuildlet) (name string) {
remoteBuildlets.Lock()
defer remoteBuildlets.Unlock()
n := 0
for {
name = fmt.Sprintf("%s-%s-%d", rb.User, rb.BuilderType, n)
if _, ok := remoteBuildlets.m[name]; ok {
n++
} else {
remoteBuildlets.m[name] = rb
return name
}
}
}
func isGCERemoteBuildlet(instName string) bool {
remoteBuildlets.Lock()
defer remoteBuildlets.Unlock()
for _, rb := range remoteBuildlets.m {
if rb.buildlet.GCEInstanceName() == instName {
return true
}
}
return false
}
func expireBuildlets() {
defer cleanTimer.Reset(remoteBuildletCleanInterval)
remoteBuildlets.Lock()
defer remoteBuildlets.Unlock()
now := time.Now()
for name, rb := range remoteBuildlets.m {
if !rb.Expires.IsZero() && rb.Expires.Before(now) {
go rb.buildlet.Close()
delete(remoteBuildlets.m, name)
}
}
}
var timeNow = time.Now // for testing
// always wrapped in requireBuildletProxyAuth.
func handleBuildletCreate(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "POST required", 400)
return
}
clientVersion := r.FormValue("version")
if clientVersion < buildlet.GomoteCreateMinVersion {
http.Error(w, fmt.Sprintf("gomote client version %q is too old; predates server minimum version %q", clientVersion, buildlet.GomoteCreateMinVersion), 400)
return
}
builderType := r.FormValue("builderType")
if builderType == "" {
http.Error(w, "missing 'builderType' parameter", 400)
return
}
bconf, ok := dashboard.Builders[builderType]
if !ok {
http.Error(w, "unknown builder type in 'builderType' parameter", 400)
return
}
user, _, _ := r.BasicAuth()
w.Header().Set("X-Supported-Version", buildlet.GomoteCreateStreamVersion)
wantStream := false // streaming JSON updates, one JSON message (type msg) per line
if clientVersion >= buildlet.GomoteCreateStreamVersion {
wantStream = true
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.(http.Flusher).Flush()
}
si := &SchedItem{
HostType: bconf.HostType,
IsGomote: true,
}
ctx := r.Context()
// ticker for sending status updates to client
var ticker <-chan time.Time
if wantStream {
t := time.NewTicker(5 * time.Second)
defer t.Stop()
ticker = t.C
}
resc := make(chan *buildlet.Client)
errc := make(chan error)
hconf := bconf.HostConfig()
go func() {
bc, err := sched.GetBuildlet(ctx, si)
if bc != nil {
resc <- bc
} else {
errc <- err
}
}()
// One of these fields is set:
type msg struct {
Error string `json:"error,omitempty"`
Buildlet *remoteBuildlet `json:"buildlet,omitempty"`
Status *types.BuildletWaitStatus `json:"status,omitempty"`
}
sendJSONLine := func(v interface{}) {
jenc, err := json.Marshal(v)
if err != nil {
log.Fatalf("remote: error marshalling JSON of type %T: %v", v, v)
}
jenc = append(jenc, '\n')
w.Write(jenc)
w.(http.Flusher).Flush()
}
sendText := func(s string) {
sendJSONLine(msg{Status: &types.BuildletWaitStatus{Message: s}})
}
// If the gomote builder type requested is a reverse buildlet
// and all instances are busy, try canceling a post-submit
// build so it'll reconnect and the scheduler will give it to
// the higher priority gomote user.
isReverse := hconf.IsReverse
if isReverse {
if hs := reversePool.buildReverseStatusJSON().HostTypes[hconf.HostType]; hs == nil {
sendText(fmt.Sprintf("host type %q is not elastic; no machines are connected", hconf.HostType))
} else {
sendText(fmt.Sprintf("host type %q is not elastic; %d of %d machines connected, %d busy",
hconf.HostType, hs.Connected, hs.Expect, hs.Busy))
if hs.Connected > 0 && hs.Idle == 0 {
// Try to cancel one.
if cancelOnePostSubmitBuildWithHostType(hconf.HostType) {
sendText(fmt.Sprintf("canceled a post-submit build on a machine of type %q; it should reconnect and get assigned to you", hconf.HostType))
}
}
}
}
for {
select {
case <-ticker:
st := sched.waiterState(si)
sendJSONLine(msg{Status: &st})
case bc := <-resc:
now := timeNow()
rb := &remoteBuildlet{
User: user,
BuilderType: builderType,
HostType: bconf.HostType,
buildlet: bc,
Created: now,
Expires: now.Add(remoteBuildletIdleTimeout),
}
rb.Name = addRemoteBuildlet(rb)
bc.SetName(rb.Name)
log.Printf("created buildlet %v for %v (%s)", rb.Name, rb.User, bc.String())
if wantStream {
// We already sent the Content-Type
// (and perhaps status update JSON
// lines) earlier, so just send the
// final JSON update with the result:
sendJSONLine(msg{Buildlet: rb})
} else {
// Legacy client path.
// TODO: delete !wantStream support 3-6 months after 2019-11-19.
w.Header().Set("Content-Type", "application/json; charset=utf-8")
sendJSONLine(rb)
}
return
case err := <-errc:
log.Printf("error creating gomote buildlet: %v", err)
if wantStream {
sendJSONLine(msg{Error: err.Error()})
} else {
http.Error(w, err.Error(), 500)
}
return
}
}
}
// always wrapped in requireBuildletProxyAuth.
func handleBuildletList(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "GET required", 400)
return
}
res := make([]*remoteBuildlet, 0) // so it's never JSON "null"
remoteBuildlets.Lock()
defer remoteBuildlets.Unlock()
user, _, _ := r.BasicAuth()
for _, rb := range remoteBuildlets.m {
if rb.User == user {
res = append(res, rb)
}
}
sort.Sort(byBuildletName(res))
jenc, err := json.MarshalIndent(res, "", " ")
if err != nil {
http.Error(w, err.Error(), 500)
return
}
jenc = append(jenc, '\n')
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Write(jenc)
}
type byBuildletName []*remoteBuildlet
func (s byBuildletName) Len() int { return len(s) }
func (s byBuildletName) Less(i, j int) bool { return s[i].Name < s[j].Name }
func (s byBuildletName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func remoteBuildletStatus() string {
remoteBuildlets.Lock()
defer remoteBuildlets.Unlock()
if len(remoteBuildlets.m) == 0 {
return "<i>(none)</i>"
}
var buf bytes.Buffer
var all []*remoteBuildlet
for _, rb := range remoteBuildlets.m {
all = append(all, rb)
}
sort.Sort(byBuildletName(all))
buf.WriteString("<ul>")
for _, rb := range all {
fmt.Fprintf(&buf, "<li><b>%s</b>, created %v ago, expires in %v</li>\n",
html.EscapeString(rb.Name),
time.Since(rb.Created), rb.Expires.Sub(time.Now()))
}
buf.WriteString("</ul>")
return buf.String()
}
func proxyBuildletHTTP(w http.ResponseWriter, r *http.Request) {
if r.TLS == nil {
http.Error(w, "https required", http.StatusBadRequest)
return
}
buildletName := r.Header.Get("X-Buildlet-Proxy")
if buildletName == "" {
http.Error(w, "missing X-Buildlet-Proxy; server misconfig", http.StatusInternalServerError)
return
}
remoteBuildlets.Lock()
rb, ok := remoteBuildlets.m[buildletName]
if ok {
rb.Expires = time.Now().Add(remoteBuildletIdleTimeout)
}
remoteBuildlets.Unlock()
if !ok {
http.Error(w, "unknown or expired buildlet", http.StatusBadGateway)
return
}
user, _, _ := r.BasicAuth()
if rb.User != user {
http.Error(w, "you don't own that buildlet", http.StatusUnauthorized)
return
}
if r.Method == "POST" && r.URL.Path == "/halt" {
err := rb.buildlet.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
rb.buildlet.Close()
remoteBuildlets.Lock()
delete(remoteBuildlets.m, buildletName)
remoteBuildlets.Unlock()
return
}
if r.Method == "POST" && r.URL.Path == "/tcpproxy" {
proxyBuildletTCP(w, r, rb)
return
}
outReq, err := http.NewRequest(r.Method, rb.buildlet.URL()+r.URL.Path+"?"+r.URL.RawQuery, r.Body)
if err != nil {
log.Printf("bad proxy request: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
outReq.Header = r.Header
outReq.ContentLength = r.ContentLength
proxy := &httputil.ReverseProxy{
Director: func(*http.Request) {}, // nothing
Transport: rb.buildlet.ProxyRoundTripper(),
FlushInterval: 500 * time.Millisecond,
ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
log.Printf("gomote proxy error for %s: %v", buildletName, err)
w.WriteHeader(http.StatusBadGateway)
fmt.Fprintf(w, "(golang.org/issue/28365): gomote proxy error: %v", err)
},
}
proxy.ServeHTTP(w, outReq)
}
// proxyBuildletTCP handles connecting to and proxying between a
// backend buildlet VM's TCP port and the client. This is called once
// it's already authenticated by proxyBuildletHTTP.
func proxyBuildletTCP(w http.ResponseWriter, r *http.Request, rb *remoteBuildlet) {
if r.ProtoMajor > 1 {
// TODO: deal with HTTP/2 requests if https://farmer.golang.org enables it later.
// Currently it does not, as other handlers Hijack too. We'd need to teach clients
// when to explicitly disable HTTP/1, or update the protocols to do read/write
// bodies instead of 101 Switching Protocols.
http.Error(w, "unexpected HTTP/2 request", http.StatusInternalServerError)
return
}
hj, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "not a Hijacker", http.StatusInternalServerError)
return
}
// The target port is a header instead of a query parameter for no real reason other
// than being consistent with the reverse buildlet registration headers.
port, err := strconv.Atoi(r.Header.Get("X-Target-Port"))
if err != nil {
http.Error(w, "invalid or missing X-Target-Port", http.StatusBadRequest)
return
}
hc, ok := dashboard.Hosts[rb.HostType]
if !ok || !hc.IsVM() {
// TODO: implement support for non-VM types if/when needed.
http.Error(w, fmt.Sprintf("unsupported non-VM host type %q", rb.HostType), http.StatusBadRequest)
return
}
ip, _, err := net.SplitHostPort(rb.buildlet.IPPort())
if err != nil {
http.Error(w, fmt.Sprintf("unexpected backend ip:port %q", rb.buildlet.IPPort()), http.StatusInternalServerError)
return
}
c, err := (&net.Dialer{}).DialContext(r.Context(), "tcp", net.JoinHostPort(ip, fmt.Sprint(port)))
if err != nil {
http.Error(w, fmt.Sprintf("failed to connect to port %v: %v", port, err), http.StatusInternalServerError)
return
}
defer c.Close()
// Hijack early so we can check for any unexpected buffered
// request data without doing a potentially blocking
// r.Body.Read. Also it's nice to be able to WriteString the
// response header explicitly. But using w.WriteHeader+w.Flush
// would probably also work. Somewhat arbitrary to do it early.
cc, buf, err := hj.Hijack()
if err != nil {
http.Error(w, fmt.Sprintf("Hijack: %v", err), http.StatusInternalServerError)
return
}
defer cc.Close()
if buf.Reader.Buffered() != 0 {
io.WriteString(cc, "HTTP/1.0 400 Bad Request\r\n\r\nUnexpected buffered data.\n")
return
}
// If we send a 101 response with an Upgrade header and a
// "Connection: Upgrade" header, that makes net/http's
// *Response.isProtocolSwitch() return true, which gives us a
// writable Response.Body on the client side, which simplifies
// the gomote code.
io.WriteString(cc, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: tcpproxy\r\nConnection: upgrade\r\n\r\n")
errc := make(chan error, 2)
// Copy from HTTP client to backend.
go func() {
_, err := io.Copy(c, cc)
errc <- err
}()
// And copy from backend to the HTTP client.
go func() {
_, err := io.Copy(cc, c)
errc <- err
}()
<-errc
}
func requireBuildletProxyAuth(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
user, pass, ok := r.BasicAuth()
if !ok {
http.Error(w, "missing required authentication", 400)
return
}
if !strings.HasPrefix(user, "user-") || builderKey(user) != pass {
if *mode == "dev" {
log.Printf("ignoring gomote authentication failure for %q in dev mode", user)
} else {
http.Error(w, "bad username or password", 401)
return
}
}
h.ServeHTTP(w, r)
})
}
var sshPrivateKeyFile string
func writeSSHPrivateKeyToTempFile(key []byte) (path string, err error) {
tf, err := ioutil.TempFile("", "ssh-priv-key")
if err != nil {
return "", err
}
if err := tf.Chmod(0600); err != nil {
return "", err
}
if _, err := tf.Write(key); err != nil {
return "", err
}
return tf.Name(), tf.Close()
}
func listenAndServeSSH() {
const listenAddr = ":2222" // TODO: flag if ever necessary?
var hostKey []byte
var err error
if *mode == "dev" {
sshPrivateKeyFile = filepath.Join(os.Getenv("HOME"), "keys", "id_gomotessh_rsa")
hostKey, err = ioutil.ReadFile(sshPrivateKeyFile)
if os.IsNotExist(err) {
log.Printf("SSH host key file %s doesn't exist; not running SSH server.", sshPrivateKeyFile)
return
}
if err != nil {
log.Fatal(err)
}
} else {
if storageClient == nil {
log.Printf("GCS storage client not available; not running SSH server.")
return
}
r, err := storageClient.Bucket(buildEnv.BuildletBucket).Object("coordinator-gomote-ssh.key").NewReader(context.Background())
if err != nil {
log.Printf("Failed to read ssh host key: %v; not running SSH server.", err)
return
}
hostKey, err = ioutil.ReadAll(r)
if err != nil {
log.Printf("Failed to read ssh host key: %v; not running SSH server.", err)
return
}
sshPrivateKeyFile, err = writeSSHPrivateKeyToTempFile(hostKey)
log.Printf("ssh: writeSSHPrivateKeyToTempFile = %v, %v", sshPrivateKeyFile, err)
if err != nil {
log.Printf("error writing ssh private key to temp file: %v; not running SSH server", err)
return
}
}
signer, err := gossh.ParsePrivateKey(hostKey)
if err != nil {
log.Printf("failed to parse SSH host key: %v; running running SSH server", err)
return
}
s := &ssh.Server{
Addr: listenAddr,
Handler: handleIncomingSSHPostAuth,
PublicKeyHandler: handleSSHPublicKeyAuth,
}
s.AddHostKey(signer)
log.Printf("running SSH server on %s", listenAddr)
err = s.ListenAndServe()
log.Printf("SSH server ended with error: %v", err)
// TODO: make ListenAndServe errors Fatal, once it has a proven track record. starting paranoid.
}
func handleSSHPublicKeyAuth(ctx ssh.Context, key ssh.PublicKey) bool {
inst := ctx.User() // expected to be of form "user-USER-goos-goarch-etc"
user := userFromGomoteInstanceName(inst)
if user == "" {
return false
}
// Map the gomote username to the github username, and use the
// github user's public ssh keys for authentication. This is
// mostly of laziness and pragmatism, not wanting to invent or
// maintain a new auth mechanism or password/key registry.
githubUser := gophers.GitHubOfGomoteUser(user)
keys := githubPublicKeys(githubUser)
for _, authKey := range keys {
if ssh.KeysEqual(key, authKey.PublicKey) {
log.Printf("for instance %q, github user %q key matched: %s", inst, githubUser, authKey.AuthorizedLine)
return true
}
}
return false
}
func handleIncomingSSHPostAuth(s ssh.Session) {
inst := s.User()
user := userFromGomoteInstanceName(inst)
requestedMutable := strings.HasPrefix(inst, "mutable-")
if requestedMutable {
inst = strings.TrimPrefix(inst, "mutable-")
}
ptyReq, winCh, isPty := s.Pty()
if !isPty {
fmt.Fprintf(s, "scp etc not yet supported; https://golang.org/issue/21140\n")
return
}
pubKey, err := metadata.ProjectAttributeValue("gomote-ssh-public-key")
if err != nil || pubKey == "" {
if err == nil {
err = errors.New("not found")
}
fmt.Fprintf(s, "failed to get GCE gomote-ssh-public-key: %v\n", err)
return
}
remoteBuildlets.Lock()
rb, ok := remoteBuildlets.m[inst]
remoteBuildlets.Unlock()
if !ok {
fmt.Fprintf(s, "unknown instance %q", inst)
return
}
hostType := rb.HostType
hostConf, ok := dashboard.Hosts[hostType]
if !ok {
fmt.Fprintf(s, "instance %q has unknown host type %q\n", inst, hostType)
return
}
bconf, ok := dashboard.Builders[rb.BuilderType]
if !ok {
fmt.Fprintf(s, "instance %q has unknown builder type %q\n", inst, rb.BuilderType)
return
}
ctx, cancel := context.WithCancel(s.Context())
defer cancel()
go rb.renew(ctx)
sshUser := hostConf.SSHUsername
useLocalSSHProxy := bconf.GOOS() != "plan9"
if sshUser == "" && useLocalSSHProxy {
fmt.Fprintf(s, "instance %q host type %q does not have SSH configured\n", inst, hostType)
return
}
if !hostConf.IsHermetic() && !requestedMutable {
fmt.Fprintf(s, "WARNING: instance %q host type %q is not currently\n", inst, hostType)
fmt.Fprintf(s, "configured to have a hermetic filesystem per boot.\n")
fmt.Fprintf(s, "You must be careful not to modify machine state\n")
fmt.Fprintf(s, "that will affect future builds. Do you agree? If so,\n")
fmt.Fprintf(s, "run gomote ssh --i-will-not-break-the-host <INST>\n")
return
}
log.Printf("connecting to ssh to instance %q ...", inst)
fmt.Fprintf(s, "# Welcome to the gomote ssh proxy, %s.\n", user)
fmt.Fprintf(s, "# Connecting to/starting remote ssh...\n")
fmt.Fprintf(s, "#\n")
var localProxyPort int
if useLocalSSHProxy {
sshConn, err := rb.buildlet.ConnectSSH(sshUser, pubKey)
log.Printf("buildlet(%q).ConnectSSH = %T, %v", inst, sshConn, err)
if err != nil {
fmt.Fprintf(s, "failed to connect to ssh on %s: %v\n", inst, err)
return
}
defer sshConn.Close()
// Now listen on some localhost port that we'll proxy to sshConn.
// The openssh ssh command line tool will connect to this IP.
ln, err := net.Listen("tcp", "localhost:0")
if err != nil {
fmt.Fprintf(s, "local listen error: %v\n", err)
return
}
localProxyPort = ln.Addr().(*net.TCPAddr).Port
log.Printf("ssh local proxy port for %s: %v", inst, localProxyPort)
var lnCloseOnce sync.Once
lnClose := func() { lnCloseOnce.Do(func() { ln.Close() }) }
defer lnClose()
// Accept at most one connection from localProxyPort and proxy
// it to sshConn.
go func() {
c, err := ln.Accept()
lnClose()
if err != nil {
return
}
defer c.Close()
errc := make(chan error, 1)
go func() {
_, err := io.Copy(c, sshConn)
errc <- err
}()
go func() {
_, err := io.Copy(sshConn, c)
errc <- err
}()
err = <-errc
}()
}
workDir, err := rb.buildlet.WorkDir(ctx)
if err != nil {
fmt.Fprintf(s, "Error getting WorkDir: %v\n", err)
return
}
ip, _, ipErr := net.SplitHostPort(rb.buildlet.IPPort())
fmt.Fprintf(s, "# `gomote push` and the builders use:\n")
fmt.Fprintf(s, "# - workdir: %s\n", workDir)
fmt.Fprintf(s, "# - GOROOT: %s/go\n", workDir)
fmt.Fprintf(s, "# - GOPATH: %s/gopath\n", workDir)
fmt.Fprintf(s, "# - env: %s\n", strings.Join(bconf.Env(), " ")) // TODO: shell quote?
fmt.Fprintf(s, "# Happy debugging.\n")
log.Printf("ssh to %s: starting ssh -p %d for %s@localhost", inst, localProxyPort, sshUser)
var cmd *exec.Cmd
switch bconf.GOOS() {
default:
cmd = exec.Command("ssh",
"-p", strconv.Itoa(localProxyPort),
"-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
"-i", sshPrivateKeyFile,
sshUser+"@localhost")
case "plan9":
fmt.Fprintf(s, "# Plan9 user/pass: glenda/glenda123\n")
if ipErr != nil {
fmt.Fprintf(s, "# Failed to get IP out of %q: %v\n", rb.buildlet.IPPort(), err)
return
}
cmd = exec.Command("/usr/local/bin/drawterm",
"-a", ip, "-c", ip, "-u", "glenda", "-k", "user=glenda")
}
cmd.Env = append(cmd.Env, fmt.Sprintf("TERM=%s", ptyReq.Term))
f, err := pty.Start(cmd)
if err != nil {
log.Printf("running ssh client to %s: %v", inst, err)
return
}
defer f.Close()
go func() {
for win := range winCh {
setWinsize(f, win.Width, win.Height)
}
}()
go func() {
io.Copy(f, s) // stdin
}()
io.Copy(s, f) // stdout
cmd.Process.Kill()
cmd.Wait()
}
func setWinsize(f *os.File, w, h int) {
syscall.Syscall(syscall.SYS_IOCTL, f.Fd(), uintptr(syscall.TIOCSWINSZ),
uintptr(unsafe.Pointer(&struct{ h, w, x, y uint16 }{uint16(h), uint16(w), 0, 0})))
}
// userFromGomoteInstanceName returns the username part of a gomote
// remote instance name.
//
// The instance name is of two forms. The normal form is:
//
// user-bradfitz-linux-amd64-0
//
// The overloaded form to convey that the user accepts responsibility
// for changes to the underlying host is to prefix the same instance
// name with the string "mutable-", such as:
//
// mutable-user-bradfitz-darwin-amd64-10_8-0
//
// The mutable part is ignored by this function.
func userFromGomoteInstanceName(name string) string {
name = strings.TrimPrefix(name, "mutable-")
if !strings.HasPrefix(name, "user-") {
return ""
}
user := name[len("user-"):]
hyphen := strings.IndexByte(user, '-')
if hyphen == -1 {
return ""
}
return user[:hyphen]
}
// authorizedKey is a Github user's SSH authorized key, in both string and parsed format.
type authorizedKey struct {
AuthorizedLine string // e.g. "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILj8HGIG9NsT34PHxO8IBq0riSBv7snp30JM8AanBGoV"
PublicKey ssh.PublicKey
}
func githubPublicKeys(user string) []authorizedKey {
// TODO: caching, rate limiting.
req, err := http.NewRequest("GET", "https://github.com/"+user+".keys", nil)
if err != nil {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req = req.WithContext(ctx)
res, err := http.DefaultClient.Do(req)
if err != nil {
log.Printf("getting %s github keys: %v", user, err)
return nil
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil
}
var keys []authorizedKey
bs := bufio.NewScanner(res.Body)
for bs.Scan() {
key, _, _, _, err := ssh.ParseAuthorizedKey(bs.Bytes())
if err != nil {
log.Printf("parsing github user %q key %q: %v", user, bs.Text(), err)
continue
}
keys = append(keys, authorizedKey{
PublicKey: key,
AuthorizedLine: strings.TrimSpace(bs.Text()),
})
}
if err := bs.Err(); err != nil {
return nil
}
return keys
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
lxd/util/http.go
|
package util
import (
"bytes"
"context"
"crypto/sha256"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"golang.org/x/sys/unix"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/logger"
)
// WriteJSON encodes the body as JSON and sends it back to the client
func WriteJSON(w http.ResponseWriter, body interface{}, debug bool) error {
var output io.Writer
var captured *bytes.Buffer
output = w
if debug {
captured = &bytes.Buffer{}
output = io.MultiWriter(w, captured)
}
err := json.NewEncoder(output).Encode(body)
if captured != nil {
shared.DebugJson(captured)
}
return err
}
// EtagHash hashes the provided data and returns the sha256
func EtagHash(data interface{}) (string, error) {
etag := sha256.New()
err := json.NewEncoder(etag).Encode(data)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", etag.Sum(nil)), nil
}
// EtagCheck validates the hash of the current state with the hash
// provided by the client
func EtagCheck(r *http.Request, data interface{}) error {
match := r.Header.Get("If-Match")
if match == "" {
return nil
}
match = strings.Trim(match, "\"")
hash, err := EtagHash(data)
if err != nil {
return err
}
if hash != match {
return fmt.Errorf("ETag doesn't match: %s vs %s", hash, match)
}
return nil
}
// HTTPClient returns an http.Client using the given certificate and proxy.
func HTTPClient(certificate string, proxy proxyFunc) (*http.Client, error) {
var err error
var cert *x509.Certificate
if certificate != "" {
certBlock, _ := pem.Decode([]byte(certificate))
if certBlock == nil {
return nil, fmt.Errorf("Invalid certificate")
}
cert, err = x509.ParseCertificate(certBlock.Bytes)
if err != nil {
return nil, err
}
}
tlsConfig, err := shared.GetTLSConfig("", "", "", cert)
if err != nil {
return nil, err
}
tr := &http.Transport{
TLSClientConfig: tlsConfig,
Dial: shared.RFC3493Dialer,
Proxy: proxy,
DisableKeepAlives: true,
}
myhttp := http.Client{
Transport: tr,
}
// Setup redirect policy
myhttp.CheckRedirect = func(req *http.Request, via []*http.Request) error {
// Replicate the headers
req.Header = via[len(via)-1].Header
return nil
}
return &myhttp, nil
}
// A function capable of proxing an HTTP request.
type proxyFunc func(req *http.Request) (*url.URL, error)
// ContextAwareRequest is an interface implemented by http.Request starting
// from Go 1.8. It supports graceful cancellation using a context.
type ContextAwareRequest interface {
WithContext(ctx context.Context) *http.Request
}
// CheckTrustState checks whether the given client certificate is trusted
// (i.e. it has a valid time span and it belongs to the given list of trusted
// certificates).
func CheckTrustState(cert x509.Certificate, trustedCerts map[string]x509.Certificate, certInfo *shared.CertInfo, trustCACertificates bool) (bool, string) {
// Extra validity check (should have been caught by TLS stack)
if time.Now().Before(cert.NotBefore) || time.Now().After(cert.NotAfter) {
return false, ""
}
if certInfo != nil && trustCACertificates {
ca := certInfo.CA()
if ca != nil && cert.CheckSignatureFrom(ca) == nil {
trusted := true
// Check whether the certificate has been revoked.
crl := certInfo.CRL()
if crl != nil {
for _, revoked := range crl.TBSCertList.RevokedCertificates {
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
// Instead of returning false, we set trusted to false, allowing the client
// to authenticate using the trust password.
trusted = false
break
}
}
}
if trusted {
return true, shared.CertFingerprint(&cert)
}
}
}
for k, v := range trustedCerts {
if bytes.Compare(cert.Raw, v.Raw) == 0 {
logger.Debug("Found cert", log.Ctx{"name": k})
return true, k
}
}
return false, ""
}
// IsRecursionRequest checks whether the given HTTP request is marked with the
// "recursion" flag in its form values.
func IsRecursionRequest(r *http.Request) bool {
recursionStr := r.FormValue("recursion")
recursion, err := strconv.Atoi(recursionStr)
if err != nil {
return false
}
return recursion != 0
}
// ListenAddresses returns a list of host:port combinations at which
// this machine can be reached
func ListenAddresses(value string) ([]string, error) {
addresses := make([]string, 0)
if value == "" {
return addresses, nil
}
localHost, localPort, err := net.SplitHostPort(value)
if err != nil {
localHost = value
localPort = shared.DefaultPort
}
if localHost == "0.0.0.0" || localHost == "::" || localHost == "[::]" {
ifaces, err := net.Interfaces()
if err != nil {
return addresses, err
}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if !ip.IsGlobalUnicast() {
continue
}
if ip.To4() == nil {
if localHost == "0.0.0.0" {
continue
}
addresses = append(addresses, fmt.Sprintf("[%s]:%s", ip, localPort))
} else {
addresses = append(addresses, fmt.Sprintf("%s:%s", ip, localPort))
}
}
}
} else {
if strings.Contains(localHost, ":") {
addresses = append(addresses, fmt.Sprintf("[%s]:%s", localHost, localPort))
} else {
addresses = append(addresses, fmt.Sprintf("%s:%s", localHost, localPort))
}
}
return addresses, nil
}
// GetListeners returns the socket-activated network listeners, if any.
//
// The 'start' parameter must be SystemdListenFDsStart, except in unit tests,
// see the docstring of SystemdListenFDsStart below.
func GetListeners(start int) []net.Listener {
defer func() {
os.Unsetenv("LISTEN_PID")
os.Unsetenv("LISTEN_FDS")
}()
pid, err := strconv.Atoi(os.Getenv("LISTEN_PID"))
if err != nil {
return nil
}
if pid != os.Getpid() {
return nil
}
fds, err := strconv.Atoi(os.Getenv("LISTEN_FDS"))
if err != nil {
return nil
}
listeners := []net.Listener{}
for i := start; i < start+fds; i++ {
unix.CloseOnExec(i)
file := os.NewFile(uintptr(i), fmt.Sprintf("inherited-fd%d", i))
listener, err := net.FileListener(file)
if err != nil {
continue
}
listeners = append(listeners, listener)
}
return listeners
}
// SystemdListenFDsStart is the number of the first file descriptor that might
// have been opened by systemd when socket activation is enabled. It's always 3
// in real-world usage (i.e. the first file descriptor opened after stdin,
// stdout and stderr), so this constant should always be the value passed to
// GetListeners, except for unit tests.
const SystemdListenFDsStart = 3
// IsJSONRequest returns true if the content type of the HTTP request is JSON.
func IsJSONRequest(r *http.Request) bool {
for k, vs := range r.Header {
if strings.ToLower(k) == "content-type" &&
len(vs) == 1 && strings.ToLower(vs[0]) == "application/json" {
return true
}
}
return false
}
|
[
"\"LISTEN_PID\"",
"\"LISTEN_FDS\""
] |
[] |
[
"LISTEN_PID",
"LISTEN_FDS"
] |
[]
|
["LISTEN_PID", "LISTEN_FDS"]
|
go
| 2 | 0 | |
shell.py
|
import sys
from tinyshell.colors import *
from tinyshell.commands import *
from tinyshell import interactive
def tests():
PASS = "[ {}PASS{} ]".format(GREEN, RESET)
FAIL = "[ {}FAIL{} ]".format(RED, RESET)
def cmd_manager():
# The following should all be equivalent.
mgr0 = CommandManager()
mgr0.register(Command("gcc", {"-o": ArgType.FILENAME}))
mgr0.register(Command("test"))
mgr0.register(Command("cat", {"-n": ArgType.NATURAL}))
#
mgr1 = CommandManager()
mgr1["gcc"] = [{"-o": ArgType.FILENAME}]
mgr1["test"] = []
mgr1["cat"] = [{"-n": ArgType.NATURAL}]
#
mgr2 = CommandManager(
("gcc", [{"-o": ArgType.FILENAME}]),
("test", []),
("cat", [{"-n": ArgType.NATURAL}])
)
# Check that the shorthands work.
assert(mgr0.cmds == mgr1.cmds == mgr2.cmds)
class TestError(Exception):
"""Raised when a test fails."""
def __init__(self, msg):
pass
def should_not_throw(fn, *args):
name = fn.__name__
try:
fn(*args)
except:
print(FAIL, name)
raise TestError("Test \"" + name + "\" did not succeed.")
print(PASS, name)
should_not_throw(cmd_manager)
def create_cmd_mgr():
return CommandManager(
("g++", [{"-o": ArgType.FILENAME}]),
("cat", [{"-n": ArgType.NATURAL}])
)
def run():
bar = "--------------------------------------------"
print(CYAN + "\nSelf-checking:" + RESET + \
" running routine unit tests...\n", bar, sep = "")
try:
tests()
print(GREEN + "\nNo error reported!" + RESET + \
" Starting shell...")
except Exception as e:
print(RED + "\nError during testing:" + RESET, e)
sys.exit(-1)
print(bar, end = "\n\n")
interactive.begin(sys.stdin, sys.stdout, create_cmd_mgr())
if __name__ == "__main__":
run()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
worknow/worknow/wsgi.py
|
"""
WSGI config for worknow project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "worknow.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
android/scripts/common.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import re
import sys
import shlex
import subprocess
import multiprocessing
import string
try:
import threading
except ImportError:
import dummy_threading as threading
class NativeLib:
def __init__ (self, apiVersion, abiVersion, prebuiltDir):
self.apiVersion = apiVersion
self.abiVersion = abiVersion
self.prebuiltDir = prebuiltDir
def __str__ (self):
return "(API: %s, ABI: %s)" % (self.apiVersion, self.abiVersion)
def __repr__ (self):
return "(API: %s, ABI: %s)" % (self.apiVersion, self.abiVersion)
def getPlatform ():
if sys.platform.startswith('linux'):
return 'linux'
else:
return sys.platform
def selectByOS (variants):
platform = getPlatform()
if platform in variants:
return variants[platform]
elif 'other' in variants:
return variants['other']
else:
raise Exception("No configuration for '%s'" % platform)
def isExecutable (path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def which (binName):
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
fullPath = os.path.join(path, binName)
if isExecutable(fullPath):
return fullPath
return None
def isBinaryInPath (binName):
return which(binName) != None
def selectFirstExistingBinary (filenames):
for filename in filenames:
if filename != None and isExecutable(filename):
return filename
return None
def selectFirstExistingDir (paths):
for path in paths:
if path != None and os.path.isdir(path):
return path
return None
def die (msg):
print msg
exit(-1)
def shellquote(s):
return '"%s"' % s.replace('\\', '\\\\').replace('"', '\"').replace('$', '\$').replace('`', '\`')
def execute (commandLine):
args = shlex.split(commandLine)
retcode = subprocess.call(args)
if retcode != 0:
raise Exception("Failed to execute '%s', got %d" % (commandLine, retcode))
def execArgs (args):
# Make sure previous stdout prints have been written out.
sys.stdout.flush()
retcode = subprocess.call(args)
if retcode != 0:
raise Exception("Failed to execute '%s', got %d" % (str(args), retcode))
def execArgsInDirectory (args, cwd, linePrefix=""):
def readApplyPrefixAndPrint (source, prefix, sink):
while True:
line = source.readline()
if len(line) == 0: # EOF
break;
sink.write(prefix + line)
process = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutJob = threading.Thread(target=readApplyPrefixAndPrint, args=(process.stdout, linePrefix, sys.stdout))
stderrJob = threading.Thread(target=readApplyPrefixAndPrint, args=(process.stderr, linePrefix, sys.stderr))
stdoutJob.start()
stderrJob.start()
retcode = process.wait()
if retcode != 0:
raise Exception("Failed to execute '%s', got %d" % (str(args), retcode))
def serialApply(f, argsList):
for args in argsList:
f(*args)
def parallelApply(f, argsList):
class ErrorCode:
def __init__ (self):
self.error = None;
def applyAndCaptureError (func, args, errorCode):
try:
func(*args)
except:
errorCode.error = sys.exc_info()
errorCode = ErrorCode()
jobs = []
for args in argsList:
job = threading.Thread(target=applyAndCaptureError, args=(f, args, errorCode))
job.start()
jobs.append(job)
for job in jobs:
job.join()
if errorCode.error:
raise errorCode.error[0], errorCode.error[1], errorCode.error[2]
class Device:
def __init__(self, serial, product, model, device):
self.serial = serial
self.product = product
self.model = model
self.device = device
def __str__ (self):
return "%s: {product: %s, model: %s, device: %s}" % (self.serial, self.product, self.model, self.device)
def getDevices (adb):
proc = subprocess.Popen([adb, 'devices', '-l'], stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise Exception("adb devices -l failed, got %d" % proc.returncode)
ptrn = re.compile(r'^([a-zA-Z0-9]+)\s+.*product:([^\s]+)\s+model:([^\s]+)\s+device:([^\s]+)')
devices = []
for line in stdout.splitlines()[1:]:
if len(line.strip()) == 0:
continue
m = ptrn.match(line)
if m == None:
print "WARNING: Failed to parse device info '%s'" % line
continue
devices.append(Device(m.group(1), m.group(2), m.group(3), m.group(4)))
return devices
def getWin32Generator ():
if which("jom.exe") != None:
return "NMake Makefiles JOM"
else:
return "NMake Makefiles"
def isNinjaSupported ():
return which("ninja") != None
def getUnixGenerator ():
if isNinjaSupported():
return "Ninja"
else:
return "Unix Makefiles"
def getExtraBuildArgs (generator):
if generator == "Unix Makefiles":
return ["--", "-j%d" % multiprocessing.cpu_count()]
else:
return []
NDK_HOST_OS_NAMES = [
"windows",
"windows-x86_64",
"darwin-x86",
"darwin-x86_64",
"linux-x86",
"linux-x86_64"
]
def getNDKHostOsName (ndkPath):
for name in NDK_HOST_OS_NAMES:
if os.path.exists(os.path.join(ndkPath, "prebuilt", name)):
return name
raise Exception("Couldn't determine NDK host OS")
# deqp/android path
ANDROID_DIR = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
# Build configuration
NATIVE_LIBS = [
# API ABI prebuiltsDir
NativeLib(13, "armeabi-v7a", 'android-arm'), # ARM v7a ABI
NativeLib(13, "x86", 'android-x86'), # x86
NativeLib(21, "arm64-v8a", 'android-arm64'), # ARM64 v8a ABI
NativeLib(21, "x86_64", 'android-x86_64'), # x86_64
]
ANDROID_JAVA_API = "android-22"
NATIVE_LIB_NAME = "libdeqp.so"
def selectNDKPath ():
candidates = [
os.path.expanduser("~/android-ndk-r11"),
"C:/android/android-ndk-r11",
os.environ.get("ANDROID_NDK_PATH", None), # If not defined, return None
]
ndkPath = selectFirstExistingDir(candidates)
if ndkPath == None:
raise Exception("None of NDK directory candidates exist: %s. Check ANDROID_NDK_PATH in common.py" % candidates)
return ndkPath
def noneSafePathJoin (*components):
if None in components:
return None
return os.path.join(*components)
# NDK paths
ANDROID_NDK_PATH = selectNDKPath()
ANDROID_NDK_HOST_OS = getNDKHostOsName(ANDROID_NDK_PATH)
ANDROID_NDK_TOOLCHAIN_VERSION = "r11" # Toolchain file is selected based on this
# Native code build settings
CMAKE_GENERATOR = selectByOS({
'win32': getWin32Generator(),
'other': getUnixGenerator()
})
EXTRA_BUILD_ARGS = getExtraBuildArgs(CMAKE_GENERATOR)
# SDK paths
ANDROID_SDK_PATH = selectFirstExistingDir([
os.environ.get("ANDROID_SDK_PATH", None),
os.path.expanduser("~/android-sdk-linux"),
os.path.expanduser("~/android-sdk-mac_x86"),
"C:/android/android-sdk-windows",
])
ANDROID_BIN = selectFirstExistingBinary([
noneSafePathJoin(ANDROID_SDK_PATH, "tools", "android"),
noneSafePathJoin(ANDROID_SDK_PATH, "tools", "android.bat"),
which('android'),
])
ADB_BIN = selectFirstExistingBinary([
which('adb'), # \note Prefer adb in path to avoid version issues on dev machines
noneSafePathJoin(ANDROID_SDK_PATH, "platform-tools", "adb"),
noneSafePathJoin(ANDROID_SDK_PATH, "platform-tools", "adb.exe"),
])
ZIPALIGN_BIN = selectFirstExistingBinary([
noneSafePathJoin(ANDROID_SDK_PATH, "tools", "zipalign"),
noneSafePathJoin(ANDROID_SDK_PATH, "tools", "zipalign.exe"),
which('zipalign'),
])
JARSIGNER_BIN = which('jarsigner')
# Apache ant
ANT_BIN = selectFirstExistingBinary([
which('ant'),
"C:/android/apache-ant-1.8.4/bin/ant.bat",
"C:/android/apache-ant-1.9.2/bin/ant.bat",
"C:/android/apache-ant-1.9.3/bin/ant.bat",
"C:/android/apache-ant-1.9.4/bin/ant.bat",
])
def makeNameValueTuple (name):
return (name, str(eval(name)))
CONFIG_VAR_NAMES = [
"ANDROID_DIR",
"NATIVE_LIBS",
"ANDROID_JAVA_API",
"NATIVE_LIB_NAME",
"ANDROID_NDK_PATH",
"ANDROID_NDK_HOST_OS",
"ANDROID_NDK_TOOLCHAIN_VERSION",
"CMAKE_GENERATOR",
"EXTRA_BUILD_ARGS",
"ANDROID_SDK_PATH",
"ANDROID_BIN",
"ADB_BIN",
"ZIPALIGN_BIN",
"JARSIGNER_BIN",
"ANT_BIN",
]
CONFIG_STRINGS = [makeNameValueTuple(x) for x in CONFIG_VAR_NAMES]
|
[] |
[] |
[
"ANDROID_SDK_PATH",
"PATH",
"ANDROID_NDK_PATH"
] |
[]
|
["ANDROID_SDK_PATH", "PATH", "ANDROID_NDK_PATH"]
|
python
| 3 | 0 | |
pkg/release/push.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package release
import (
"context"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"cloud.google.com/go/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/release/pkg/gcp/gcs"
"k8s.io/release/pkg/tar"
"k8s.io/release/pkg/util"
"k8s.io/utils/pointer"
)
// PushBuild is the main structure for pushing builds.
type PushBuild struct {
opts *PushBuildOptions
}
// PushBuildOptions are the main options to pass to `PushBuild`.
type PushBuildOptions struct {
// Specify an alternate bucket for pushes (normally 'devel' or 'ci').
Bucket string
// Specify an alternate build directory. Will be automatically determined
// if not set.
BuildDir string
// If set, push docker images to specified registry/project.
DockerRegistry string
// Comma separated list which can be used to upload additional version
// files to GCS. The path is relative and is append to a GCS path. (--ci
// only).
ExtraVersionMarkers string
// Specify a suffix to append to the upload destination on GCS.
GCSSuffix string
// Version to be used. Usually automatically discovered, but it can be
// used to overwrite this behavior.
Version string
// Append suffix to version name if set.
VersionSuffix string
// Do not exit error if the build already exists on the gcs path.
AllowDup bool
// Used when called from Jenkins (for ci runs).
CI bool
// Do not update the latest file.
NoUpdateLatest bool
// Do not mark published bits on GCS as publicly readable.
PrivateBucket bool
// Specifies a fast build (linux amd64 only).
Fast bool
// Validate that the remove image digests exists, needs `skopeo` in
// `$PATH`.
ValidateRemoteImageDigests bool
}
type stageFile struct {
srcPath string
dstPath string
required bool
}
const extraDir = "extra"
var gcpStageFiles = []stageFile{
{
srcPath: filepath.Join(GCEPath, "configure-vm.sh"),
dstPath: extraDir + "/gce/configure-vm.sh",
required: false,
},
{
srcPath: filepath.Join(GCIPath, "node.yaml"),
dstPath: extraDir + "/gce/node.yaml",
required: true,
},
{
srcPath: filepath.Join(GCIPath, "master.yaml"),
dstPath: extraDir + "/gce/master.yaml",
required: true,
},
{
srcPath: filepath.Join(GCIPath, "configure.sh"),
dstPath: extraDir + "/gce/configure.sh",
required: true,
},
{
srcPath: filepath.Join(GCIPath, "shutdown.sh"),
dstPath: extraDir + "/gce/shutdown.sh",
required: false,
},
}
var windowsStageFiles = []stageFile{
{
srcPath: filepath.Join(WindowsLocalPath, "configure.ps1"),
dstPath: extraDir + "/gce/windows/configure.ps1",
required: true,
},
{
srcPath: filepath.Join(WindowsLocalPath, "common.psm1"),
dstPath: extraDir + "/gce/windows/common.psm1",
required: true,
},
{
srcPath: filepath.Join(WindowsLocalPath, "k8s-node-setup.psm1"),
dstPath: extraDir + "/gce/windows/k8s-node-setup.psm1",
required: true,
},
{
srcPath: filepath.Join(WindowsLocalPath, "testonly/install-ssh.psm1"),
dstPath: extraDir + "/gce/windows/install-ssh.psm1",
required: true,
},
{
srcPath: filepath.Join(WindowsLocalPath, "testonly/user-profile.psm1"),
dstPath: extraDir + "/gce/windows/user-profile.psm1",
required: true,
},
}
// NewPushBuild can be used to create a new PushBuild instnace.
func NewPushBuild(opts *PushBuildOptions) *PushBuild {
return &PushBuild{opts}
}
// Push pushes the build by taking the internal options into account.
func (p *PushBuild) Push() error {
version, err := p.findLatestVersion()
if err != nil {
return errors.Wrap(err, "find latest version")
}
logrus.Infof("Latest version is %s", version)
if err := p.CheckReleaseBucket(); err != nil {
return errors.Wrap(err, "check release bucket access")
}
if err := p.StageLocalArtifacts(); err != nil {
return errors.Wrap(err, "staging local artifacts")
}
gcsDest := "devel"
if p.opts.CI {
gcsDest = "ci"
}
gcsDest += p.opts.GCSSuffix
if p.opts.Fast {
gcsDest = filepath.Join(gcsDest, "fast")
}
gcsDest = filepath.Join(gcsDest, version)
logrus.Infof("GCS destination is %s", gcsDest)
if err := p.PushReleaseArtifacts(
filepath.Join(p.opts.BuildDir, GCSStagePath, version),
gcsDest,
); err != nil {
return errors.Wrap(err, "push release artifacts")
}
if err := p.PushContainerImages(); err != nil {
return errors.Wrap(err, "push container images")
}
if !p.opts.CI {
logrus.Info("No CI flag set, we're done")
return nil
}
if p.opts.NoUpdateLatest {
logrus.Info("Not updating version markers")
return nil
}
// Publish release to GCS
versionMarkers := strings.Split(p.opts.ExtraVersionMarkers, ",")
if err := NewPublisher().PublishVersion(
gcsDest, version, p.opts.BuildDir, p.opts.Bucket, versionMarkers,
p.opts.PrivateBucket, p.opts.Fast,
); err != nil {
return errors.Wrap(err, "publish release")
}
return nil
}
func (p *PushBuild) findLatestVersion() (latestVersion string, err error) {
// Check if latest build uses bazel
dir, err := os.Getwd()
if err != nil {
return "", errors.Wrap(err, "get working directory")
}
isBazel, err := BuiltWithBazel(dir)
if err != nil {
return "", errors.Wrap(err, "identify if release built with Bazel")
}
latestVersion = p.opts.Version
if p.opts.Version == "" {
if isBazel {
logrus.Info("Using Bazel build version")
version, err := ReadBazelVersion(dir)
if err != nil {
return "", errors.Wrap(err, "read Bazel build version")
}
latestVersion = version
} else {
logrus.Info("Using Dockerized build version")
version, err := ReadDockerizedVersion(dir)
if err != nil {
return "", errors.Wrap(err, "read Dockerized build version")
}
latestVersion = version
}
}
logrus.Infof("Using build version: %s", latestVersion)
valid, err := IsValidReleaseBuild(latestVersion)
if err != nil {
return "", errors.Wrap(
err, "determine if release build version is valid",
)
}
if !valid {
return "", errors.Errorf(
"build version %s is not valid for release", latestVersion,
)
}
if p.opts.CI && IsDirtyBuild(latestVersion) {
return "", errors.Errorf(
"refusing to push dirty build %s with --ci flag given",
latestVersion,
)
}
if p.opts.VersionSuffix != "" {
latestVersion += "-" + p.opts.VersionSuffix
}
if p.opts.BuildDir == "" {
logrus.Info("BuildDir is not set, setting it automatically")
if isBazel {
logrus.Infof(
"Release is build by bazel, setting BuildDir to %s",
BazelBuildDir,
)
p.opts.BuildDir = BazelBuildDir
} else {
logrus.Infof(
"Release is build in a container, setting BuildDir to %s",
BuildDir,
)
p.opts.BuildDir = BuildDir
}
}
return strings.TrimSpace(latestVersion), nil
}
// CheckReleaseBucket verifies that a release bucket exists and the current
// authenticated GCP user has write permissions to it.
// was: releaselib.sh: release::gcs::check_release_bucket
func (p *PushBuild) CheckReleaseBucket() error {
logrus.Infof("Checking bucket %s for write permissions", p.opts.Bucket)
client, err := storage.NewClient(context.Background())
if err != nil {
return errors.Wrap(err,
"fetching gcloud credentials, try running "+
`"gcloud auth application-default login"`,
)
}
bucket := client.Bucket(p.opts.Bucket)
if bucket == nil {
return errors.Errorf(
"identify specified bucket for artifacts: %s", p.opts.Bucket,
)
}
// Check if bucket exists and user has permissions
requiredGCSPerms := []string{"storage.objects.create"}
perms, err := bucket.IAM().TestPermissions(
context.Background(), requiredGCSPerms,
)
if err != nil {
return errors.Wrap(err, "find release artifact bucket")
}
if len(perms) != 1 {
return errors.Errorf(
"GCP user must have at least %s permissions on bucket %s",
requiredGCSPerms, p.opts.Bucket,
)
}
return nil
}
// StageLocalArtifacts locally stages the release artifacts
// was releaselib.sh: release::gcs::locally_stage_release_artifacts
func (p *PushBuild) StageLocalArtifacts() error {
logrus.Info("Staging local artifacts")
stageDir := filepath.Join(p.opts.BuildDir, GCSStagePath, p.opts.Version)
logrus.Infof("Cleaning staging dir %s", stageDir)
if err := util.RemoveAndReplaceDir(stageDir); err != nil {
return errors.Wrap(err, "remove and replace GCS staging directory")
}
// Copy release tarballs to local GCS staging directory for push
logrus.Info("Copying release tarballs")
if err := util.CopyDirContentsLocal(
filepath.Join(p.opts.BuildDir, ReleaseTarsPath), stageDir,
); err != nil {
return errors.Wrap(err, "copy source directory into destination")
}
extraPath := filepath.Join(stageDir, extraDir)
if util.Exists(extraPath) {
// Copy helpful GCP scripts to local GCS staging directory for push
logrus.Info("Copying extra GCP stage files")
if err := p.copyStageFiles(stageDir, gcpStageFiles); err != nil {
return errors.Wrapf(err, "copy GCP stage files")
}
// Copy helpful Windows scripts to local GCS staging directory for push
logrus.Info("Copying extra Windows stage files")
if err := p.copyStageFiles(stageDir, windowsStageFiles); err != nil {
return errors.Wrapf(err, "copy Windows stage files")
}
} else {
logrus.Infof("Skipping not existing extra dir %s", extraPath)
}
// Copy the plain binaries to GCS. This is useful for install scripts that
// download the binaries directly and don't need tars.
plainBinariesPath := filepath.Join(p.opts.BuildDir, ReleaseStagePath)
if util.Exists(plainBinariesPath) {
logrus.Info("Copying plain binaries")
if err := CopyBinaries(
filepath.Join(p.opts.BuildDir, ReleaseStagePath),
stageDir,
); err != nil {
return errors.Wrap(err, "stage binaries")
}
} else {
logrus.Infof(
"Skipping not existing plain binaries dir %s", plainBinariesPath,
)
}
// Write the release checksums
logrus.Info("Writing checksums")
if err := WriteChecksums(stageDir); err != nil {
return errors.Wrap(err, "write checksums")
}
return nil
}
// copyStageFiles takes the staging dir and copies each file of `files` into
// it. It also ensures that the base dir exists before copying the file (if the
// file is `required`).
func (p *PushBuild) copyStageFiles(stageDir string, files []stageFile) error {
for _, file := range files {
dstPath := filepath.Join(stageDir, file.dstPath)
if file.required {
if err := os.MkdirAll(
filepath.Dir(dstPath), os.FileMode(0o755),
); err != nil {
return errors.Wrapf(
err, "create destination path %s", file.dstPath,
)
}
}
if err := util.CopyFileLocal(
filepath.Join(p.opts.BuildDir, file.srcPath),
dstPath, file.required,
); err != nil {
return errors.Wrapf(err, "copy stage file")
}
}
return nil
}
// PushReleaseArtifacts can be used to push local artifacts from the `srcPath`
// to the remote `gcsPath`. The Bucket has to be set via the `Bucket` option.
func (p *PushBuild) PushReleaseArtifacts(srcPath, gcsPath string) error {
dstPath := gcs.NormalizeGCSPath(filepath.Join(p.opts.Bucket, gcsPath))
logrus.Infof("Pushing release artifacts from %s to %s", srcPath, dstPath)
return errors.Wrap(
gcs.RsyncRecursive(srcPath, dstPath), "rsync artifacts to GCS",
)
}
// PushContainerImages will publish container images into the set
// `DockerRegistry`. It also validates if the remove manifests are correct,
// which can be turned of by setting `ValidateRemoteImageDigests` to `false`.
func (p *PushBuild) PushContainerImages() error {
if p.opts.DockerRegistry == "" {
logrus.Info("Registry is not set, will not publish container images")
return nil
}
images := NewImages()
logrus.Infof("Publishing container images for %s", p.opts.Version)
if err := images.Publish(
p.opts.DockerRegistry, p.opts.Version, p.opts.BuildDir,
); err != nil {
return errors.Wrap(err, "publish container images")
}
if !p.opts.ValidateRemoteImageDigests {
logrus.Info("Will not validate remote image digests")
return nil
}
if err := images.Validate(
p.opts.DockerRegistry, p.opts.Version, p.opts.BuildDir,
); err != nil {
return errors.Wrap(err, "validate container images")
}
return nil
}
// CopyStagedFromGCS copies artifacts from GCS and between buckets as needed.
// was: anago:copy_staged_from_gcs
func (p *PushBuild) CopyStagedFromGCS(stagedBucket, buildVersion string) error {
logrus.Info("Copy staged release artifacts from GCS")
copyOpts := gcs.DefaultGCSCopyOptions
copyOpts.NoClobber = pointer.BoolPtr(p.opts.AllowDup)
copyOpts.AllowMissing = pointer.BoolPtr(false)
gsStageRoot := filepath.Join(p.opts.Bucket, stagePath, buildVersion, p.opts.Version)
gsReleaseRoot := filepath.Join(p.opts.Bucket, "release", p.opts.Version)
src := filepath.Join(gsStageRoot, GCSStagePath, p.opts.Version)
dst := gsReleaseRoot
logrus.Infof("Bucket to bucket copy from %s to %s", src, dst)
if err := gcs.CopyBucketToBucket(src, dst, copyOpts); err != nil {
return errors.Wrap(err, "copy stage to release bucket")
}
src = filepath.Join(src, kubernetesTar)
dst = filepath.Join(p.opts.BuildDir, GCSStagePath, p.opts.Version, kubernetesTar)
logrus.Infof("Copy kubernetes tarball %s to %s", src, dst)
if err := gcs.CopyToLocal(src, dst, copyOpts); err != nil {
return errors.Wrapf(err, "copy to local")
}
src = filepath.Join(gsStageRoot, ImagesPath)
if err := os.MkdirAll(p.opts.BuildDir, os.FileMode(0o755)); err != nil {
return errors.Wrap(err, "create dst dir")
}
logrus.Infof("Copy container images %s to %s", src, p.opts.BuildDir)
if err := gcs.CopyToLocal(src, p.opts.BuildDir, copyOpts); err != nil {
return errors.Wrapf(err, "copy to local")
}
return nil
}
// StageLocalSourceTree creates a src.tar.gz from the Kubernetes sources and
// uploads it to GCS.
func (p *PushBuild) StageLocalSourceTree(buildVersion string) error {
workDir := os.Getenv("GOPATH")
if workDir == "" {
return errors.New("GOPATH is not set")
}
tarballPath := filepath.Join(workDir, sourcesTar)
logrus.Infof("Creating source tree tarball in %s", workDir)
exclude, err := regexp.Compile(fmt.Sprintf(`.*/%s-.*`, BuildDir))
if err != nil {
return errors.Wrap(err, "compile tarball exclude regex")
}
if err := tar.Compress(
tarballPath, filepath.Join(workDir, "src"), exclude,
); err != nil {
return errors.Wrap(err, "create tarball")
}
logrus.Infof("Uploading source tree tarball to GCS")
copyOpts := gcs.DefaultGCSCopyOptions
copyOpts.AllowMissing = pointer.BoolPtr(false)
if err := gcs.CopyToGCS(
tarballPath,
filepath.Join(p.opts.Bucket, stagePath, buildVersion, sourcesTar),
copyOpts,
); err != nil {
return errors.Wrap(err, "copy tarball to GCS")
}
logrus.Infof("Removing local source tree tarball")
return errors.Wrap(os.RemoveAll(tarballPath), "remove local source tarball")
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
libioc/Jail.py
|
# Copyright (c) 2017-2019, Stefan Grönke
# Copyright (c) 2014-2018, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""iocage Jail module."""
import typing
import os
import random
import shlex
import shutil
import libzfs
import freebsd_sysctl
import libioc.Types
import libioc.errors
import libioc.events
import libioc.helpers
import libioc.helpers_object
import libioc.JailState
import libioc.DevfsRules
import libioc.Host
import libioc.Config.Jail.JailConfig
import libioc.Network
import libioc.Release
import libioc.Storage
import libioc.Storage.NullFSBasejail
import libioc.Storage.Standalone
import libioc.Storage.ZFSBasejail
import libioc.ZFSShareStorage
import libioc.LaunchableResource
import libioc.VersionedResource
import libioc.Config.Jail.Properties.ResourceLimit
import libioc.ResourceSelector
import libioc.Config.Jail.File.Fstab
class JailResource(
libioc.LaunchableResource.LaunchableResource,
libioc.VersionedResource.VersionedResource
):
"""Resource that represents a jail."""
_jail: 'JailGenerator'
_fstab: 'libioc.Config.Jail.File.Fstab.Fstab'
host: 'libioc.Host.HostGenerator'
root_datasets_name: typing.Optional[str]
def __init__(
self,
jail: 'JailGenerator',
dataset: typing.Optional[libzfs.ZFSDataset]=None,
dataset_name: typing.Optional[str]=None,
config_type: str="auto",
config_file: typing.Optional[str]=None,
logger: typing.Optional['libioc.Logger.Logger']=None,
zfs: typing.Optional[libioc.ZFS.ZFS]=None,
host: typing.Optional['libioc.Host.HostGenerator']=None,
fstab: typing.Optional['libioc.Config.Jail.File.Fstab.Fstab']=None,
root_datasets_name: typing.Optional[str]=None,
) -> None:
self.host = libioc.helpers_object.init_host(self, host)
self.root_datasets_name = root_datasets_name
if fstab is not None:
self._fstab = fstab
if jail is not None:
self._jail = jail
libioc.LaunchableResource.LaunchableResource.__init__(
self,
dataset=dataset,
dataset_name=dataset_name,
config_type=config_type,
config_file=config_file,
logger=logger,
zfs=zfs
)
@property
def jail(self) -> 'JailGenerator':
"""
Jail instance that belongs to the resource.
Usually the resource becomes inherited from the jail itself.
It can still be used linked to a foreign jail by passing jail as
named attribute to the __init__ function
"""
try:
return self._jail
except AttributeError:
pass
# is instance of Jail itself
if isinstance(self, JailGenerator):
jail = self # type: JailGenerator
return jail
raise Exception("This resource is not a jail or not linked to one")
@property
def fstab(self) -> 'libioc.Config.Jail.File.Fstab.Fstab':
"""
Memoized fstab wrapper of a Jail.
The fstab file is stored in the top level of a Jails dataset
"""
try:
return self._fstab
except AttributeError:
pass
try:
release = self.release
except AttributeError:
release = None
jail = self.jail
fstab = libioc.Config.Jail.File.Fstab.Fstab(
jail=jail,
release=release,
logger=self.logger,
host=jail.host
)
self._fstab = fstab
return fstab
@property
def dataset_name(self) -> str:
"""
Name of the jail base ZFS dataset.
If the resource has no dataset or dataset_name assigned yet,
the jail id is used to find name the dataset
"""
try:
return str(self._assigned_dataset_name)
except AttributeError:
pass
try:
return str(self._dataset.name)
except AttributeError:
pass
return self._dataset_name_from_jail_name
@dataset_name.setter
def dataset_name(self, value: str) -> None:
"""
Override a jail's dataset name.
This will cause Jail.dataset to point to this specific dataset instead
of an auto-detected one to enable referencing jails from datasets
that are not managed by iocage
"""
self._dataset_name = value
def autoset_dataset_name(self) -> None:
"""
Automatically determine and set the dataset_name.
When a jail was created with the new attribute enabled, the dataset
might not exist, so that a dataset_name lookup would fail. Calling this
method sets the jails dataset_name to a child dataset of the hosts
jails dataset with the jails name.
"""
if self.root_datasets_name is None:
base_name = self.host.datasets.main.jails.name
else:
base_name = self.host.datasets.__getitem__(
self.root_datasets_name
).jails.name
self.dataset_name = f"{base_name}/{self.name}"
@property
def _dataset_name_from_jail_name(self) -> str:
jail_id = str(self.jail.config["id"])
if jail_id is None:
raise libioc.errors.JailUnknownIdentifier()
if self.root_datasets_name is None:
base_name = self.host.datasets.main.jails.name
else:
try:
base_name = self.host.datasets.__getitem__(
self.root_datasets_name
).jails.name
except KeyError:
raise libioc.errors.SourceNotFound(logger=self.logger)
return f"{base_name}/{jail_id}"
@property
def source(self) -> str:
"""Return the name of the jails source root datasets."""
return str(
self.host.datasets.find_root_datasets_name(self.dataset_name)
)
def get(self, key: str) -> typing.Any:
"""Get a config value from the jail or defer to its resource."""
try:
return libioc.Resource.Resource.get(self, key)
except AttributeError:
pass
return self.jail.config[key]
class JailGenerator(JailResource):
"""
iocage unit orchestrates a jail's configuration and manages state.
Jails are represented as a zfs dataset ``zpool/iocage/jails/<NAME>``
Directory Structure:
zpool/iocage/jails/<NAME>:
The jail's dataset containing it's configuration and root dataset.
iocage-legacy used to store a jails configuration as ZFS
properties on this dataset. Even though the modern JSON config
mechanism is preferred.
zpool/iocage/jails/<NAME>/root:
This directory is the dataset used as jail's root when starting a
jail. Usually the clone source of a root dataset is a snapshot of
the release's root dataset.
zpool/iocage/jails/<NAME>/config.json:
Jails configured with the latest configuration style store their
information in a JSON file. When this file is found in the jail's
dataset, libiocage assumes the jail to be a JSON-style jail and
ignores other configuration mechanisms.
zpool/iocage/jails/<NAME>/config:
Another compatible configuration mechanism is a UCL file. It's
content is only taken into account if no JSON or ZFS configuration
was found.
Jail Types:
Standalone:
The /root dataset gets cloned from a release at creation time. It
it not affected by changes to the Release and persists all data
within the jail.
NullFS Basejail:
The fastest method to spawn a basejail by mounting read-only
directories from the release's root dataset by creating a snapshot
of the release on each boot of the jail. When a release is
updated, the jail is updated as well on the next reboot. This type
is the one used by the Python implementation of libioc.
ZFS Basejail: Legacy basejails used to clone individual datasets from a
release (stored in ``zpool/iocage/base/<RELEASE>``).
"""
_class_storage = libioc.Storage.Storage
_state: typing.Optional['libioc.JailState.JailState']
_relative_hook_script_dir: str
_provisioner: 'libioc.Provisioning.Prototype'
def __init__(
self,
data: typing.Union[str, typing.Dict[str, typing.Any]]={},
dataset: typing.Optional[libzfs.ZFSDataset]=None,
dataset_name: typing.Optional[str]=None,
config_type: str="auto",
config_file: typing.Optional[str]=None,
logger: typing.Optional['libioc.Logger.Logger']=None,
zfs: typing.Optional['libioc.ZFS.ZFS']=None,
host: typing.Optional['libioc.Host.Host']=None,
fstab: typing.Optional['libioc.Config.Jail.File.Fstab.Fstab']=None,
root_datasets_name: typing.Optional[str]=None,
new: bool=False
) -> None:
"""
Initialize a Jail.
Args:
data (string|dict):
Jail configuration dict or jail name as string identifier.
zfs (libzfs.ZFS): (optional)
Inherit an existing libzfs.ZFS() instance from ancestor classes
host (libioc.Host): (optional)
Inherit an existing Host instance from ancestor classes
logger (libioc.Logger): (optional)
Inherit an existing Logger instance from ancestor classes
"""
self.logger = libioc.helpers_object.init_logger(self, logger)
self.zfs = libioc.helpers_object.init_zfs(self, zfs)
self.host = libioc.helpers_object.init_host(self, host)
self._relative_hook_script_dir = "/.iocage"
if isinstance(data, str):
data = dict(id=data)
if "id" in data.keys():
data["id"] = self._resolve_name(data["id"])
JailResource.__init__(
self,
jail=self,
dataset=dataset,
dataset_name=dataset_name,
config_type=config_type,
config_file=config_file,
logger=self.logger,
zfs=self.zfs,
host=self.host,
fstab=fstab,
root_datasets_name=root_datasets_name
)
if not new and (("id" not in data) or (data["id"] is None)):
try:
# try to get the Jail name from it's dataset_name
data["id"] = self.dataset_name.split("/").pop()
except libioc.errors.JailUnknownIdentifier:
pass
self.config = libioc.Config.Jail.JailConfig.JailConfig(
host=self.host,
jail=self,
logger=self.logger
)
self.config.clone(data)
self.storage = self._class_storage(
safe_mode=False,
jail=self,
logger=self.logger,
zfs=self.zfs
)
if new is False:
self.config.read(data=self.read_config(), skip_on_error=True)
if self.config["id"] is None:
self.config["id"] = self.dataset_name.split("/").pop()
@property
def state(self) -> 'libioc.JailState.JailState':
"""
Memoized JailState.
This object holds information about the jail state. The information
is memoized on first access because the lookup is expensive. Please
keep in mind to update the object when executing operations that
potentially change a jails state.
"""
if "_state" not in object.__dir__(self):
return self._init_state()
elif object.__getattribute__(self, "_state") is None:
return self._init_state()
return object.__getattribute__(self, "_state")
@state.setter
def state(self, value: 'libioc.JailState.JailState') -> None:
"""
Return the jails JailState object.
A public interface to set a jails state. This behavior is part of a
performance optimization when dealing with large numbers of jails.
"""
object.__setattr__(self, '_state', value)
@property
def provisioner(self) -> 'libioc.Provisioning.prototype.Provisioner':
"""
Return the jails Provisioner instance.
The provisioner itself is going to interpret the jails configuration
dynamically, so that the Provisioner instance can be memoized.
"""
try:
return self._provisioner
except AttributeError:
pass
import libioc.Provisioning
self._provisioner = libioc.Provisioning.Provisioner(jail=self)
return self._provisioner
def _init_state(self) -> 'libioc.JailState.JailState':
state = libioc.JailState.JailState(
self.identifier,
logger=self.logger
)
self.state = state
state.query()
return state
def start(
self,
quick: bool=False,
passthru: bool=False,
single_command: typing.Optional[str]=None,
event_scope: typing.Optional['libioc.events.Scope']=None,
dependant_jails_seen: typing.List['JailGenerator']=[],
start_dependant_jails: bool=True
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""
Start the jail.
Args:
quick (bool):
Skip several operations that are not required when a jail
was unchanged since its last start (for example when restarting
it).
passthru (bool):
Execute commands in an interactive shell.
single_command (str):
When set the jail is launched non-persistent. The startup cycle
reduces to the `prestart`, `command` and `poststop` hooks with
the singe_command being executed in a /bin/sh context.
event_scope (libioc.lib.events.Scope): (default=None)
Provide an existing libiocage event scope or automatically
create a new one instead.
dependant_jails_seen (list[libioc.JailGenerator]):
Jail depends can have circular dependencies. By passing a list
of already started jails to the start command, iocage does not
need to query their state, because they are known to be running
already. This argument is internally used when starting a jails
dependants recursively.
start_dependant_jails (bool):
When disabled, no dependant jails will be started.
"""
self.require_jail_existing()
self.require_jail_stopped()
self.require_jail_match_hostid()
try:
yield from self.config["resolver"].apply(
jail=self,
event_scope=event_scope
)
except Exception as e:
raise e
events: typing.Any = libioc.events
jailLaunchEvent = events.JailLaunch(jail=self, scope=event_scope)
dependant_jails_started: typing.List[JailGenerator] = []
if start_dependant_jails is True:
dependant_jails_seen.append(self)
DependantsStartEvent = libioc.events.JailDependantsStart
for event in self._start_dependant_jails(
self.config["depends"],
event_scope=event_scope,
dependant_jails_seen=dependant_jails_seen
):
if isinstance(event, DependantsStartEvent) is True:
if event.done and (event.error is None):
dependant_jails_started.extend(event.started_jails)
yield event
self._ensure_script_dir()
jail_start_script_dir = "".join([
self.root_dataset.mountpoint,
self._relative_hook_script_dir
])
if os.path.isdir(jail_start_script_dir) is False:
os.makedirs(jail_start_script_dir, 0o755)
exec_prestart: typing.List[str] = self._get_resource_limits_commands()
exec_start: typing.List[str] = [
f". {self._relative_hook_script_dir}/.env"
]
exec_created: typing.List[str] = [
f"echo \"export IOC_JID=$IOC_JID\" > {self.script_env_path}",
"set -eu",
]
exec_poststart: typing.List[str] = []
if self.config["vnet"]:
_created, _start = self._start_vimage_network()
exec_created += _created
exec_start += _start
exec_start += self._configure_localhost_commands()
exec_start += self._configure_routes_commands()
if self.host.ipfw_enabled is True:
exec_start.append("service ipfw onestop")
if self.config["jail_zfs"] is True:
share_storage = self._zfs_share_storage
share_storage.mount_zfs_shares()
exec_start += share_storage.read_commands("jail")
exec_created += share_storage.read_commands()
if self.config["exec_prestart"] is not None:
exec_prestart += [self.config["exec_prestart"]]
if self.config["exec_created"] is not None:
exec_created += [self.config["exec_created"]]
if self.config["exec_start"] is not None and (single_command is None):
exec_start += [self.config["exec_start"]]
if self.config["exec_poststart"] is not None:
exec_poststart += [self.config["exec_poststart"]]
self._write_hook_script(
"prestart",
self._wrap_hook_script_command_string(
exec_prestart,
ignore_errors=False
)
)
self._write_hook_script(
"created",
self._wrap_hook_script_command_string(
exec_created,
)
)
self._write_hook_script(
"start",
self._wrap_hook_script_command_string(
exec_start,
jailed=True,
ignore_errors=False
)
)
self._write_hook_script(
"poststart",
self._wrap_hook_script_command_string([
"set -eu",
"/bin/echo running exec.created hook on the host",
f"/bin/sh {self.get_hook_script_path('created')} 2>&1",
"/bin/echo running exec.start hook in the jail",
(
f"/usr/sbin/jexec {self.identifier} "
f"{self._relative_hook_script_dir}/start.sh"
),
"/bin/echo running exec.poststart hook on the host",
] + exec_poststart)
)
yield jailLaunchEvent.begin()
def _stop_failed_jail(
) -> typing.Generator['libioc.events.IocEvent', None, None]:
jails_to_stop = [self]
if start_dependant_jails is True:
jails_to_stop.extend(list(reversed(dependant_jails_started)))
for jail_to_stop in jails_to_stop:
yield from jail_to_stop.stop(
force=True,
event_scope=jailLaunchEvent.scope
)
jailLaunchEvent.add_rollback_step(_stop_failed_jail)
if self.is_basejail is True:
self.storage_backend.apply(self.storage, self.release)
if quick is False:
unknown_config_parameters = list(
self.config.unknown_config_parameters
)
if len(unknown_config_parameters) > 0:
_unused_parameters = str(", ".join(unknown_config_parameters))
self.logger.warn(
f"Unused JailConfig parameters: {_unused_parameters}"
)
self._save_autoconfig()
try:
self._prepare_stop()
if single_command is None:
stdout, stderr, returncode = self._launch_persistent_jail(
passthru=passthru
)
else:
stdout, stderr, returncode = self._launch_single_command_jail(
single_command,
passthru=passthru
)
if returncode != 0:
raise libioc.errors.JailLaunchFailed(
jail=self,
logger=self.logger
)
except libioc.errors.IocException as e:
yield from jailLaunchEvent.fail_generator(e)
raise e
yield jailLaunchEvent.end(stdout=stdout)
@property
def _zfs_share_storage(
self
) -> libioc.ZFSShareStorage.QueuingZFSShareStorage:
return libioc.ZFSShareStorage.QueuingZFSShareStorage(
jail=self,
logger=self.logger
)
def _start_dependant_jails(
self,
terms: libioc.Filter.Terms,
dependant_jails_seen: typing.List['JailGenerator'],
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
jailDependantsStartEvent = libioc.events.JailDependantsStart(
jail=self,
scope=event_scope
)
started_jails: typing.List[JailGenerator] = []
yield jailDependantsStartEvent.begin()
_depends = self.config["depends"]
if len(_depends) == 0:
yield jailDependantsStartEvent.skip("No dependant jails")
return
dependant_jails = sorted(
libioc.Jails.JailsGenerator(
filters=_depends,
host=self.host,
logger=self.logger,
zfs=self.zfs
),
key=lambda x: x.config["priority"]
)
for dependant_jail in dependant_jails:
if dependant_jail == self:
self.logger.warn(f"The jail {self.name} depends on itself")
continue
if dependant_jail in dependant_jails_seen:
self.logger.spam(
f"Circular dependency {dependant_jail.name} - skipping"
)
continue
dependant_jails_seen.append(dependant_jail)
jailDependantStartEvent = libioc.events.JailDependantStart(
jail=dependant_jail,
scope=jailDependantsStartEvent.scope
)
yield jailDependantStartEvent.begin()
dependant_jail.state.query()
if dependant_jail.running is True:
yield jailDependantStartEvent.skip("already running")
continue
try:
yield from dependant_jail.start(
event_scope=jailDependantStartEvent.scope,
dependant_jails_seen=dependant_jails_seen
)
except libioc.errors.IocException as err:
yield jailDependantStartEvent.fail(err)
yield from jailDependantsStartEvent.fail_generator(err)
raise err
yield jailDependantStartEvent.end()
started_jails.append(dependant_jail)
# revert start of previously started dependants after failure
def _revert_start(
jail: JailGenerator
) -> typing.Callable[
[],
typing.Generator['libioc.events.IocEvent', None, None]
]:
def revert_method() -> typing.Generator[
'libioc.events.IocEvent',
None,
None
]:
yield from jail.stop(force=True)
return revert_method
jailDependantsStartEvent.add_rollback_step(
_revert_start(dependant_jail)
)
yield jailDependantsStartEvent.end(
started_jails=started_jails
)
def _run_poststop_hook_manually(self) -> None:
self.logger.debug("Running poststop hook manually")
libioc.helpers.exec(self.get_hook_script_path("poststop"))
def _wrap_jail_command(
self,
commands: typing.Optional[typing.List[str]]
) -> typing.List[str]:
"""Wrap a jail hook command for a host hook script."""
if commands is None:
return []
EOF_IDENTIFIER = f"EOF{random.getrandbits(64)}"
output: typing.List[str] = [
"set -eu",
"echo 'Executing jail start scripts'",
"jexec -j {self.identifier} /bin/sh <<{EOF_IDENTIFIER}"
] + commands + [
EOF_IDENTIFIER,
"set +e"
]
return output
def _wrap_hook_script_command(
self,
commands: typing.Optional[typing.Union[str, typing.List[str]]],
ignore_errors: bool=True,
jailed: bool=False, # ToDo: remove unused argument
write_env: bool=True
) -> typing.List[str]:
if isinstance(commands, str):
return [commands]
elif commands is None:
return []
else:
return commands
def _wrap_hook_script_command_string(
self,
commands: typing.Optional[typing.Union[str, typing.List[str]]],
ignore_errors: bool=True,
jailed: bool=False,
write_env: bool=True
) -> str:
return "\n".join(self._wrap_hook_script_command(
commands=commands,
ignore_errors=ignore_errors,
jailed=jailed,
write_env=write_env
))
def fork_exec(
self,
command: str,
passthru: bool=False,
event_scope: typing.Optional['libioc.events.Scope']=None,
start_dependant_jails: bool=True,
dependant_jails_seen: typing.List['JailGenerator']=[],
**temporary_config_override: typing.Any
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""
Start a jail, run a command and shut it down immediately.
Args:
command (string):
The command to execute in the jail.
passthru (bool):
Execute commands in an interactive shell.
event_scope (libioc.lib.events.Scope): (default=None)
Provide an existing libiocage event scope or automatically
create a new one instead.
dependant_jails_seen (list[libioc.JailGenerator]):
Jail depends can have circular dependencies. By passing a list
of already started jails to the start command, iocage does not
need to query their state, because they are known to be running
already. This argument is internally used when starting a jails
dependants recursively.
start_dependant_jails (bool):
When disabled, no dependant jails will be started.
**temporary_config_override (dict(str, any)):
Other named arguments temporary override JailConfig properties.
For example:
jail = libioc.JailGenerator("myjail")
events = jail.fork_exec("ifconfig", vnet=False)
print(list(events))
"""
self.require_jail_existing()
self.require_jail_stopped()
original_config = self.config
config_data = original_config.data
for key, value in temporary_config_override.items():
config_data[key] = value
self.config = libioc.Config.Jail.JailConfig.JailConfig(
host=self.host,
jail=self,
logger=self.logger
)
self.config.clone(original_config.data)
try:
fork_exec_events = JailGenerator.start(
self,
single_command=command,
passthru=passthru,
event_scope=event_scope,
dependant_jails_seen=dependant_jails_seen,
start_dependant_jails=start_dependant_jails
)
for event in fork_exec_events:
yield event
finally:
self.config = original_config
def _run_hook(self, hook_name: str) -> typing.Optional[
libioc.helpers.CommandOutput
]:
"""
Execute a jail hook.
Hooks are executed during the start and stop process of the jail.
"""
key = f"exec_{hook_name}"
value = str(self.config.get(key, "/usr/bin/true"))
if value == "/usr/bin/true":
return None
self.logger.verbose(
f"Running {hook_name} hook for {self.humanreadable_name}"
)
lex = shlex.shlex(value) # noqa: T484
lex.whitespace_split = True
command = list(lex)
if (hook_name == "start") or (hook_name == "stop"):
return self.exec(
command,
passthru=False
)
# ToDo: Deprecate and remove this method
raise NotImplementedError("_run_hook only supports start/stop")
def _ensure_script_dir(self) -> None:
"""Ensure that the launch scripts dir exists."""
realpath = os.path.realpath(self.launch_script_dir)
if realpath.startswith(self.dataset.mountpoint) is False:
raise libioc.errors.SecurityViolationConfigJailEscape(
file=realpath
)
if os.path.isdir(realpath) is False:
os.makedirs(realpath, 0o755)
def _prepare_stop(self) -> None:
self._ensure_script_dir()
exec_prestop = []
exec_stop = []
exec_poststop = self._teardown_mounts() + self._clear_resource_limits()
# ToDo: self.config.get("exec_prestop", "")
if self.config["exec_prestop"] is not None:
exec_prestop.append(self.config["exec_prestop"])
if self.config["exec_stop"] is not None:
exec_stop.append(self.config["exec_stop"])
exec_poststop = self._stop_network() + exec_poststop
if self.config["exec_poststop"] is not None:
exec_poststop.append(self.config["exec_poststop"])
if self.config["jail_zfs"] is True:
share_storage = libioc.ZFSShareStorage.QueuingZFSShareStorage(
jail=self,
logger=self.logger
)
share_storage.umount_zfs_shares()
exec_stop += share_storage.read_commands("jail")
exec_poststop += share_storage.read_commands()
if self.running and (os.path.isfile(self.script_env_path) is False):
# when a jail was started from other iocage variants
self._write_temporary_script_env()
exec_poststop.append(f"rm \"{shlex.quote(self.script_env_path)}\"")
self._write_hook_script(
"prestop",
self._wrap_hook_script_command_string(exec_prestop)
)
self._write_hook_script(
"stop",
self._wrap_hook_script_command_string(
exec_stop,
jailed=True,
ignore_errors=True
)
)
self._write_hook_script(
"poststop",
self._wrap_hook_script_command_string(
exec_poststop,
write_env=False,
ignore_errors=True
)
)
def stop(
self,
force: bool=False,
event_scope: typing.Optional['libioc.events.Scope']=None,
log_errors: bool=True
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""
Stop a jail.
Args:
force (bool): (default=False)
Ignores failures and enforces teardown if True.
event_scope (libioc.lib.events.Scope): (default=None)
Provide an existing libiocage event scope or automatically
create a new one instead.
log_errors (bool): (default=True)
When disabled errors are not passed to the logger. This is
useful in scripted contexts when then stop operation was
executed to enforce a defined jail state.
"""
if force is False:
self.require_jail_existing(log_errors=log_errors)
self.require_jail_running(log_errors=log_errors)
events: typing.Any = libioc.events
jailDestroyEvent = events.JailDestroy(self, scope=event_scope)
self._prepare_stop()
yield jailDestroyEvent.begin()
try:
self._write_jail_conf(force=force)
self._destroy_jail(log_errors=log_errors)
except Exception as e:
if force is True:
yield jailDestroyEvent.skip()
self.logger.debug(
"Manually executing prestop and poststop hooks"
)
try:
for hook_name in ["prestop", "poststop"]:
libioc.helpers.exec(
command=[self.get_hook_script_path(hook_name)]
)
except Exception as e:
self.logger.warn(str(e))
else:
yield jailDestroyEvent.fail(e)
raise e
yield jailDestroyEvent.end()
try:
self.state.query()
except Exception as e:
if force is True:
self.logger.warn(str(e))
else:
raise e
def _write_temporary_script_env(self) -> None:
self.logger.debug(
f"Writing the hook script .env file {self.script_env_path}"
f" for JID {self.jid}"
)
self._ensure_script_dir()
with open(self.script_env_path, "w") as f:
f.write(f"export IOC_JID={self.jid}")
def _write_jail_conf(self, force: bool=False) -> None:
if force is True:
stop_command = "/usr/bin/true"
else:
stop_command = (
f"[ -f \"{self._relative_hook_script_dir}/stop.sh\" ]"
" || exit 0; "
f". {self._relative_hook_script_dir}/stop.sh"
)
content = "\n".join([
self.identifier + " {",
(
"exec.prestop = "
f"\"/bin/sh {self.get_hook_script_path('prestop')}\";"
), (
"exec.poststop = "
f"\"/bin/sh {self.get_hook_script_path('poststop')}\";"
), (
f"exec.stop = \"{stop_command}\";"
), (
f"exec.jail_user = {self._get_value('exec_jail_user')};"
),
"}"
])
self.logger.debug(f"Writing jail.conf file to {self._jail_conf_file}")
with open(self._jail_conf_file, "w") as f:
f.write(content)
@property
def _jail_conf_file(self) -> str:
return f"{self.launch_script_dir}/jail.conf"
def restart(
self,
shutdown: bool=False,
force: bool=False,
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""Restart the jail."""
failed: bool = False
jailRestartEvent = libioc.events.JailRestart(
jail=self,
scope=event_scope
)
jailShutdownEvent = libioc.events.JailShutdown(
jail=self,
scope=jailRestartEvent.scope
)
JailSoftShutdownEvent = libioc.events.JailSoftShutdown(
jail=self,
scope=jailRestartEvent.scope
)
jailStartEvent = libioc.events.JailStart(
jail=self,
scope=jailRestartEvent.scope
)
yield jailRestartEvent.begin()
if shutdown is False:
# soft stop
yield JailSoftShutdownEvent.begin()
try:
self._run_hook("stop")
yield JailSoftShutdownEvent.end()
except libioc.errors.IocException:
yield JailSoftShutdownEvent.fail(exception=False)
# service start
yield jailStartEvent.begin()
try:
self._run_hook("start")
yield jailStartEvent.end()
except libioc.errors.IocException:
yield jailStartEvent.fail(exception=False)
else:
# full shutdown
yield jailShutdownEvent.begin()
try:
for event in self.stop():
yield event
yield jailShutdownEvent.end()
except libioc.errors.IocException:
failed = True
yield jailShutdownEvent.fail(exception=False)
if force is False:
# only continue when force is enabled
yield jailRestartEvent.fail(exception=False)
return
# start
yield jailStartEvent.begin()
try:
for event in self.start():
yield event
yield jailStartEvent.end()
except libioc.errors.IocException:
failed = True
yield jailStartEvent.fail(exception=False)
# respond to failure
if failed is True:
yield jailRestartEvent.fail(exception=False)
return
yield jailRestartEvent.end()
def destroy(
self,
force: bool=False,
force_stop: bool=False,
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""
Destroy a Jail and it's datasets.
Args:
force (bool): (default=False)
This flag enables whether an existing jail should be shut down
before destroying the dataset. By default destroying a jail
requires it to be stopped.
force_stop (bool): (default=False)
A jail is force stopped when either the force_stop argument was
set or the force option was enabled and the jail is running.
When being enabled the argument invokes a full stop before
destroying the jail.
"""
self.state.query()
if event_scope is None:
event_scope = libioc.events.Scope()
_stop_jail = force_stop
if force is False:
self.require_jail_stopped()
else:
_stop_jail = (self.running is True)
if _stop_jail is True:
try:
stop_events = JailGenerator.stop(
self,
force=True,
event_scope=event_scope,
log_errors=(force_stop is False)
)
for event in stop_events:
yield event
except libioc.lib.errors.JailDestructionFailed:
pass
zfsDatasetDestroyEvent = libioc.events.ZFSDatasetDestroy(
dataset=self.dataset,
scope=event_scope
)
yield zfsDatasetDestroyEvent.begin()
try:
self.zfs.delete_dataset_recursive(self.dataset)
except Exception as e:
zfsDatasetDestroyEvent.fail(e)
raise e
yield zfsDatasetDestroyEvent.end()
def rename(
self,
new_name: str,
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""
Change the name of a jail.
Args:
new_name (str):
The new name of a jail. It might not be used by another Jail
and must differ from the current name.
"""
self.require_jail_existing()
self.require_jail_stopped()
self.require_storage_backend()
if libioc.helpers.validate_name(new_name) is False:
raise libioc.errors.InvalidJailName(
name=new_name,
logger=self.logger
)
current_id = self.config["id"]
current_mountpoint = self.dataset.mountpoint
jailRenameEvent = libioc.events.JailRename(
jail=self,
current_name=current_id,
new_name=new_name,
scope=event_scope
)
self.config["id"] = new_name # validates new_name
yield jailRenameEvent.begin()
self.logger.debug(f"Renaming jail {current_id} to {new_name}")
def revert_id_change() -> None:
self.config["id"] = current_id
self.logger.debug(f"Jail id reverted to {current_id}")
jailRenameEvent.add_rollback_step(revert_id_change)
try:
events = self.storage_backend.rename(
self.storage,
new_name=new_name,
event_scope=jailRenameEvent.scope
)
for event in events:
yield jailRenameEvent.child_event(event)
if event.error is not None:
raise event.error
except BaseException as e:
yield jailRenameEvent.fail(e)
raise e
# Update fstab to the new dataset
fstab_path_events = self._update_fstab_paths(
current_mountpoint,
event_scope=jailRenameEvent.scope
)
for event in fstab_path_events:
yield event
yield jailRenameEvent.end()
def _update_fstab_paths(
self,
old_path_prefix: str,
new_path_prefix: typing.Optional[str]=None,
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""
Update a path in the whole fstab file.
When no new_path_prefix is provided, the jail's root dataset is used.
"""
if new_path_prefix is None:
_new_path_prefix = self.dataset.mountpoint
else:
_new_path_prefix = new_path_prefix
jailFstabUpdateEvent = libioc.events.JailFstabUpdate(
jail=self,
scope=event_scope
)
yield jailFstabUpdateEvent.begin()
try:
self.fstab.read_file()
self.fstab.replace_path(
old_path_prefix,
_new_path_prefix
)
self.fstab.save()
yield jailFstabUpdateEvent.end()
except BaseException as e:
yield jailFstabUpdateEvent.fail(e)
raise e
def create(
self,
resource: typing.Optional[typing.Union[
'JailGenerator',
'libioc.Release.ReleaseGenerator',
str
]]=None
) -> None:
"""
Create a Jail from a given Resource.
Args:
resource (Jail or Release):
The (new) jail is created from this resource.
If no resource is specified, an empty dataset will be created
"""
if isinstance(resource, str):
resource = libioc.Release(resource)
if isinstance(resource, JailGenerator):
self.create_from_template(template=resource)
elif isinstance(resource, libioc.Release.ReleaseGenerator):
self.create_from_release(release=resource)
else:
self.create_from_scratch()
self._ensure_script_dir()
def create_from_scratch(
self
) -> None:
"""Create a new jail without any root dataset content."""
self._create_skeleton()
def create_from_release(
self,
release: 'libioc.Release.ReleaseGenerator'
) -> None:
"""
Create a Jail from a Release.
Args:
resource (Release):
The jail is created from the provided resource.
This can be either another Jail or a Release.
"""
if release.fetched is False:
raise libioc.errors.ReleaseNotFetched(
name=release.name,
logger=self.logger
)
self.config["release"] = release.full_name
self._create_from_resource(release)
def create_from_template(
self,
template: 'JailGenerator'
) -> None:
"""Create a Jail from a template Jail."""
template.require_jail_is_template()
existing_config_keys = list(self.config.keys())
for key in template.config.keys():
if key in (["id", "name", "template"] + existing_config_keys):
continue
self.config[key] = template.config[key]
self.config['release'] = template.release.full_name
self.config['basejail'] = template.config['basejail']
self.config['basejail_type'] = template.config['basejail_type']
self._create_from_resource(template)
def promote(self) -> None:
"""Promote all datasets of the jail."""
self.zfs.promote_dataset(self.dataset, logger=self.logger)
def clone_from_jail(
self,
source_jail: 'JailGenerator',
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""Create a Jail from another Jail."""
self.autoset_dataset_name()
if event_scope is None:
event_scope = libioc.events.Scope()
yield from source_jail.clone_to_dataset(
self.dataset_name,
event_scope=event_scope
)
self.config.clone(source_jail.config.data, skip_on_error=True)
self.save()
fstab_update_generator = self._update_fstab_paths(
source_jail.root_dataset.mountpoint,
event_scope=event_scope
)
for event in fstab_update_generator:
yield event
def clone_to_dataset(
self,
destination_dataset_name: str,
delete_existing: bool=False,
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""Clones the jails dataset to another dataset with the given name."""
jailCloneEvent = libioc.events.JailClone(
jail=self,
scope=event_scope
)
yield jailCloneEvent.begin()
try:
self.zfs.clone_dataset(
source=self.dataset,
target=destination_dataset_name,
delete_existing=delete_existing
)
except Exception as e:
err = libioc.errors.ZFSException(
*e.args,
logger=self.logger
)
yield jailCloneEvent.fail(err)
raise err
yield jailCloneEvent.end()
def _create_skeleton(self) -> None:
if self.config["id"] is None:
self.config["id"] = str(libioc.helpers.get_random_uuid())
self.require_jail_not_existing()
self.logger.verbose(
f"Creating jail '{self.config['id']}'"
)
for key, value in self.config.data.items():
msg = f"{key} = {value}"
self.logger.spam(msg, indent=1)
self.create_resource()
def _create_from_resource(
self,
resource: 'libioc.Resource.Resource'
) -> None:
self._create_skeleton()
backend = self.storage_backend
if backend is not None:
backend.setup(self.storage, resource)
self.config["hostid"] = self.host.id
self._update_fstab()
self.save()
@property
def is_basejail(self) -> bool:
"""
Return True if a Jail is a basejail.
If this is the case, parts of the jails dataset will be mounted
from its release or upstream Jail (for example a Template)
"""
return self.config.get("basejail", False) is True
@property
def storage_backend(self) -> libioc.Storage.Storage:
"""
Return the jail storage abstraction class.
Returns the class that represents the jails storage backend according
to its configuration.
"""
if not self.is_basejail:
return libioc.Storage.Standalone.StandaloneJailStorage
if self.config["basejail_type"] == "nullfs":
return libioc.Storage.NullFSBasejail.NullFSBasejailStorage
if self.config["basejail_type"] == "zfs":
return libioc.Storage.ZFSBasejail.ZFSBasejailStorage
def save(self) -> None:
"""Permanently save a jail's configuration."""
self._write_config(self.config.data)
self._save_autoconfig()
def _save_autoconfig(self) -> None:
"""Save auto-generated files."""
self.rc_conf.save()
self._update_fstab()
def _update_fstab(self) -> None:
if self.config["basejail_type"] == "nullfs":
self.fstab.release = self.release
else:
self.fstab.release = None
self.fstab.read_file()
self.fstab.save()
def exec(
self,
command: typing.List[str],
env: typing.Dict[str, str]={},
passthru: bool=False,
**kwargs: typing.Any
) -> libioc.helpers.CommandOutput:
"""
Execute a command in a running jail.
command (list):
A list of command and it's arguments
Example: ["/usr/bin/whoami"]
env (dict):
The dictionary may contain env variables that will be forwarded to
the executed jail command.
passthru (bool): (default=False)
When enabled the commands stdout and stderr are directory forwarded
to the attached terminal. The results will not be included in the
CommandOutput, so that (None, None, <returncode>) is returned.
"""
command = ["/usr/sbin/jexec", str(self.jid)] + command
command_env = self.env
for env_key, env_value in env.items():
command_env[env_key] = env_value
stdout, stderr, returncode = self._exec_host_command(
command,
env=command_env,
passthru=passthru
)
return stdout, stderr, returncode
def passthru(
self,
command: typing.List[str],
env: typing.Optional[typing.Dict[str, str]]=None
) -> libioc.helpers.CommandOutput:
"""
Execute a command in a started jail and passthrough STDIN and STDOUT.
command (list):
A list of command and it's arguments
Example: ["/bin/sh"]
"""
if isinstance(command, str):
command = [command]
return self._exec_host_command(
command=[
"/usr/sbin/jexec",
str(self.jid)
] + command,
passthru=True,
env=env
)
def exec_console(
self
) -> libioc.helpers.CommandOutput:
"""Shortcut to drop into a shell of a started jail."""
self.require_jail_running()
return self.passthru(
["/usr/bin/login"] + self.config["login_flags"]
)
def _destroy_jail(self, log_errors: bool=True) -> None:
stdout, stderr, returncode = self._exec_host_command(
[
"/usr/sbin/jail",
"-v",
"-r",
"-f",
self._jail_conf_file,
self.identifier
],
passthru=False,
env=self.env
)
if returncode > 0:
raise libioc.errors.JailDestructionFailed(
jail=self,
logger=(self.logger if log_errors else None)
)
@property
def _dhcp_enabled(self) -> bool:
"""Return True if any ip4_addr uses DHCP."""
if self.config["ip4_addr"] is None:
return False
return ("dhcp" in self.config["ip4_addr"].networks) is True
@property
def devfs_ruleset(self) -> libioc.DevfsRules.DevfsRuleset:
"""
Return the number of the jail's devfs ruleset.
When a new combination of the base ruleset specified in
jail.config["devfs_ruleset"] and rules automatically added by iocage
appears, the according rule is automatically created and added to the
/etc/devfs.rules file on the host
Users may reference a rule by numeric identifier or name. This numbers
are automatically selected, so it's advisable to use names.1
"""
try:
configured_devfs_ruleset = self.host.devfs.find_by_number(
int(self.config["devfs_ruleset"])
)
except ValueError:
configured_devfs_ruleset = self.host.devfs.find_by_name(
self.config["devfs_ruleset"]
)
devfs_ruleset = libioc.DevfsRules.DevfsRuleset()
devfs_ruleset.clone(configured_devfs_ruleset)
if self._dhcp_enabled is True:
devfs_ruleset.append("add path 'bpf*' unhide")
if self._allow_mount_zfs == "1":
devfs_ruleset.append("add path zfs unhide")
if self.config["jail_zfs"] is True:
unhidden_parents: typing.Set[str] = set()
shared_datasets = self._zfs_share_storage.get_zfs_datasets()
if len(shared_datasets) > 0:
devfs_ruleset.append("add path zvol unhide")
for shared_dataset in shared_datasets:
current_dataset_name = "zvol"
for fragment in shared_dataset.name.split("/"):
current_dataset_name += f"/{fragment}"
if current_dataset_name in unhidden_parents:
continue
unhidden_parents.add(current_dataset_name)
devfs_ruleset.append(
f"add path {current_dataset_name} unhide"
)
devfs_ruleset.append(
f"add path {current_dataset_name}/* unhide"
)
if self.config["allow_vmm"] is True:
devfs_ruleset.append("add path vmm unhide")
devfs_ruleset.append("add path vmm/* unhide")
devfs_ruleset.append("add path nmdm* unhide")
# create if the final rule combination does not exist as ruleset
if devfs_ruleset not in self.host.devfs:
self.logger.verbose("New devfs ruleset combination")
# note: name and number of devfs_ruleset are both None
new_ruleset_number = self.host.devfs.new_ruleset(devfs_ruleset)
self.host.devfs.save()
return new_ruleset_number
else:
ruleset_line_position = self.host.devfs.index(devfs_ruleset)
return self.host.devfs[ruleset_line_position].number
@staticmethod
def __get_launch_command(jail_args: typing.List[str]) -> typing.List[str]:
return ["/usr/sbin/jail", "-c"] + jail_args
@property
def _launch_args(self) -> typing.List[str]:
config = self.config
vnet = (config["vnet"] is True)
value: str
jail_param_args: typing.List[str] = []
for sysctl_name, sysctl in libioc.JailParams.JailParams().items():
if sysctl.ctl_type == freebsd_sysctl.types.NODE:
# skip NODE
continue
if sysctl_name == "security.jail.param.devfs_ruleset":
value = str(self.devfs_ruleset)
elif sysctl_name == "security.jail.param.path":
value = self.root_dataset.mountpoint
elif sysctl_name == "security.jail.param.name":
value = self.identifier
elif sysctl_name == "security.jail.param.allow.mount.zfs":
value = str(self._allow_mount_zfs)
elif sysctl_name == "security.jail.param.vnet":
if vnet is False:
# vnet is only used when explicitly enabled
# (friendly to Kernels without VIMAGE support)
continue
value = "vnet"
elif vnet and sysctl_name.startswith("security.jail.param.ip"):
continue
else:
config_property_name = sysctl.iocage_name
if self.config._is_known_property(config_property_name):
value = config[config_property_name]
else:
continue
sysctl.value = value
jail_param_args.append(str(sysctl))
jail_args = [
f"exec.timeout={self._get_value('exec_timeout')}",
f"stop.timeout={self._get_value('stop_timeout')}",
f"exec.prestart=\"{self.get_hook_script_path('prestart')}\"",
f"exec.prestop=\"{self.get_hook_script_path('prestop')}\"",
f"exec.poststop=\"{self.get_hook_script_path('poststop')}\"",
f"exec.jail_user={self._get_value('exec_jail_user')}",
f"mount.fstab={self.fstab.path}",
f"mount.devfs={self._get_value('mount_devfs')}",
"allow.dying"
]
return jail_param_args + jail_args
def _launch_persistent_jail(
self,
passthru: bool
) -> libioc.helpers.CommandOutput:
command = self.__get_launch_command(self._launch_args + [
"persist",
f"exec.poststart=\"{self.get_hook_script_path('poststart')}\""
])
stdout, stderr, returncode = self._exec_host_command(
command=command,
passthru=passthru,
env=self.env
)
if returncode > 0:
self.logger.verbose(
f"Jail '{self.humanreadable_name}' was not started"
)
return stdout, stderr, returncode
self.state.query()
self.logger.verbose(
f"Jail '{self.humanreadable_name}' started with JID {self.jid}"
)
return stdout, stderr, returncode
def _exec_host_command(
self,
command: typing.List[str],
passthru: bool,
env: typing.Optional[typing.Dict[str, str]]=None
) -> libioc.helpers.CommandOutput:
try:
if passthru is True:
return libioc.helpers.exec_passthru(
command,
logger=self.logger,
env=env
)
else:
exec_events = libioc.helpers.exec_generator(
command,
logger=self.logger,
env=env
)
try:
while True:
self.logger.spam(
next(exec_events).decode("UTF-8"),
indent=1
)
except StopIteration as return_statement:
output: libioc.helpers.CommandOutput
output = return_statement.value # noqa: T484
return output
except (KeyboardInterrupt, SystemExit):
raise libioc.errors.JailExecutionAborted(
jail=self,
logger=None
)
def _launch_single_command_jail(
self,
jail_command: str,
passthru: bool
) -> libioc.helpers.CommandOutput:
command = self.__get_launch_command(self._launch_args + [
"nopersist",
f"exec.poststart=\"{self.get_hook_script_path('host_command')}\"",
"command=/usr/bin/true"
])
_identifier = str(shlex.quote(self.identifier))
_jls_command = f"/usr/sbin/jls -j {_identifier} jid"
self._write_hook_script("host_command", "\n".join(
[
f"IOC_JID=$({_jls_command} 2>&1 || echo -1)",
"set -e",
f"/bin/sh {self.get_hook_script_path('created')}",
(
f"/usr/sbin/jexec {self.identifier} "
f"{self._relative_hook_script_dir}/command.sh"
" 2>&1"
),
f"/bin/sh {self.get_hook_script_path('poststop')}"
]
))
_ipfw_enabled = self.host.ipfw_enabled
self._write_hook_script("command", "\n".join(
(["set +e", "service ipfw onestop"] if _ipfw_enabled else []) + [
"set -e",
f". {self._relative_hook_script_dir}/start.sh",
jail_command,
]
))
stdout, stderr, returncode = self._exec_host_command(
command=command,
passthru=passthru,
env=self.env
)
if returncode > 0:
message = f"Jail {self.humanreadable_name} command failed."
else:
message = f"Jail {self.humanreadable_name} command finished."
self.logger.verbose(message)
return stdout, stderr, returncode
def _get_value(self, key: str) -> str:
"""Return jail command consumable config value string."""
return str(libioc.helpers.to_string(
self.config[key],
true="1",
false="0",
none=""
))
@property
def networks(self) -> typing.List[libioc.Network.Network]:
"""Return the list of a jails configured networks."""
networks = []
nics = self.config["interfaces"]
if nics is None:
return []
for nic in nics:
bridge = nics[nic]
try:
ipv4_addresses = self.config["ip4_addr"][nic]
except (KeyError, TypeError):
ipv4_addresses = []
try:
ipv6_addresses = self.config["ip6_addr"][nic]
except (KeyError, TypeError):
ipv6_addresses = []
net = libioc.Network.Network(
jail=self,
nic=nic,
ipv4_addresses=ipv4_addresses,
ipv6_addresses=ipv6_addresses,
bridge=bridge,
logger=self.logger
)
networks.append(net)
return networks
def _write_hook_script(self, hook_name: str, command_string: str) -> None:
file = self.get_hook_script_path(hook_name)
existed = os.path.isfile(file)
if hook_name in ["created", "poststart", "prestop"]:
_identifier = str(shlex.quote(self.identifier))
_jls_command = f"/usr/sbin/jls -j {_identifier} jid"
command_string = (
"IOC_JID="
f"$({_jls_command} 2>&1 || echo -1)"
"\n" + command_string
)
if hook_name == "poststop":
command_string = (
"[ -f \"$(dirname $0)/.env\" ] && "
". \"$(dirname $0)/.env\""
"\n"
) + command_string
with open(file, "w") as f:
f.write("\n".join([
"#!/bin/sh",
command_string
]))
if existed is False:
shutil.chown(file, "root", "wheel")
os.chmod(file, 0o755) # nosec: executable script
@property
def launch_script_dir(self) -> str:
"""Return the launch-scripts directory path of the jail."""
return f"{self.jail.dataset.mountpoint}/launch-scripts"
@property
def script_env_path(self) -> str:
"""Return the absolute path to the jail script env file."""
return f"{self.launch_script_dir}/.env"
def get_hook_script_path(self, hook_name: str) -> str:
"""Return the absolute path to the hook script file."""
return f"{self.jail.launch_script_dir}/{hook_name}.sh"
def _start_vimage_network(self) -> typing.Tuple[
'libioc.Network.CreatedCommandList',
'libioc.Network.JailCommandList'
]:
self.logger.debug("Starting VNET/VIMAGE")
created: typing.List[str] = []
start: typing.List[str] = []
for network in self.networks:
_created, _start = network.setup()
created += _created
start += _start
return created, start
def _stop_network(self) -> typing.List[str]:
if self.config["vnet"]:
return self._stop_vimage_network()
else:
return self._stop_non_vimage_network()
def _stop_non_vimage_network(self) -> typing.List[str]:
commands: typing.List[str] = []
for protocol in (4, 6,):
config_value = self.config[f"ip{protocol}_addr"]
if config_value is None:
return commands
for nic, addresses in config_value.items():
if addresses is None:
continue
for address in addresses:
if isinstance(address, str):
# skip DHCP and ACCEPT_RTADV
continue
inet = "inet" if (protocol == 4) else "inet6"
commands.append(
f"/sbin/ifconfig {nic} {inet} {address} remove"
)
return commands
def _stop_vimage_network(self) -> typing.List[str]:
commands: typing.List[str] = []
for network in self.networks:
commands += network.teardown()
return commands
def _configure_localhost_commands(self) -> typing.List[str]:
return ["/sbin/ifconfig lo0 localhost"]
def _get_resource_limits_commands(self) -> typing.List[str]:
commands: typing.List[str] = []
if self.config['rlimits'] is False:
self.logger.verbose("Resource limits disabled")
return commands
for key in libioc.Config.Jail.Properties.ResourceLimit.properties:
try:
rlimit_prop = self.config[key]
if rlimit_prop.is_unset is True:
continue
except (KeyError, AttributeError):
continue
commands.append(" ".join([
"/usr/bin/rctl",
"-a",
f"jail:{self.identifier}:{key}:{rlimit_prop.limit_string}"
]))
return commands
def _clear_resource_limits(self) -> typing.List[str]:
if self.config['rlimits'] is False:
return []
self.logger.verbose("Clearing resource limits")
return [f"/usr/bin/rctl -r jail:{self.identifier} 2>/dev/null || true"]
@property
def _allow_mount(self) -> int:
if self._allow_mount_zfs == 1:
return 1
return int(self._get_value("allow_mount"))
@property
def _allow_mount_zfs(self) -> int:
if self.config["jail_zfs"] is True:
return 1
return int(self._get_value("allow_mount_zfs"))
def _configure_routes_commands(self) -> typing.List[str]:
defaultrouter = self.config["defaultrouter"]
defaultrouter6 = self.config["defaultrouter6"]
commands: typing.List[str] = []
if defaultrouter is not None:
commands += list(defaultrouter.apply(jail=self))
if defaultrouter6 is not None:
commands += list(defaultrouter6.apply(jail=self))
if len(commands) == 0:
self.logger.spam("no static routes configured")
return commands
def require_jail_is_template(self, log_errors: bool=True) -> None:
"""Raise JailIsTemplate exception if the jail is a template."""
if self.config['template'] is False:
raise libioc.errors.JailNotTemplate(
jail=self,
logger=(self.logger if log_errors else None)
)
def require_jail_match_hostid(self, log_errors: bool=True) -> None:
"""Raise JailIsTemplate exception if the jail is a template."""
if self.hostid_check_ok is False:
raise libioc.errors.JailHostIdMismatch(
jail=self,
host_hostid=self.host.id,
logger=(self.logger if log_errors else None)
)
@property
def hostid_check_ok(self) -> bool:
"""Return true if the hostid check passes."""
if self.config["hostid_strict_check"] is False:
self.logger.spam("hostid_strict_check is disabled")
return True
jail_hostid = self.config["hostid"]
if (jail_hostid is None) or (jail_hostid == self.host.id):
return True
return False
def require_storage_backend(self, log_errors: bool=True) -> None:
"""Raise if the jail was not initialized with a storage backend."""
if self.storage_backend is None:
raise Exception("The jail has no storage backend.")
def require_jail_not_template(self, log_errors: bool=True) -> None:
"""Raise JailIsTemplate exception if the jail is a template."""
if self.config['template'] is True:
raise libioc.errors.JailIsTemplate(
jail=self,
logger=(self.logger if log_errors else None)
)
def require_jail_not_existing(self, log_errors: bool=True) -> None:
"""Raise JailAlreadyExists exception if the jail already exists."""
if self.exists:
raise libioc.errors.JailAlreadyExists(
jail=self,
logger=(self.logger if log_errors else None)
)
def require_jail_existing(self, log_errors: bool=True) -> None:
"""Raise JailDoesNotExist exception if the jail does not exist."""
if not self.exists:
raise libioc.errors.JailDoesNotExist(
jail=self,
logger=(self.logger if log_errors else None)
)
def require_jail_stopped(self, log_errors: bool=True) -> None:
"""Raise JailAlreadyRunning exception if the jail is running."""
if self.running is not False:
raise libioc.errors.JailAlreadyRunning(
jail=self,
logger=(self.logger if log_errors else None)
)
def require_jail_running(self, log_errors: bool=True) -> None:
"""Raise JailNotRunning exception if the jail is stopped."""
if not self.running:
raise libioc.errors.JailNotRunning(
jail=self,
logger=(self.logger if log_errors else None)
)
def _teardown_mounts(self) -> typing.List[str]:
commands: typing.List[str] = []
fstab_destinations = [line["destination"] for line in self.fstab]
system_mountpoints = list(filter(
os.path.isdir,
map(
self._get_absolute_path_from_jail_asset,
[
"/dev/fd",
"/dev",
"/proc",
"/root/compat/linux/proc",
"/root/etcupdate",
"/root/usr/ports",
"/root/usr/src",
"/tmp" # nosec: B108
]
)
))
mountpoints = fstab_destinations + system_mountpoints
commands.append(" ".join(libioc.helpers.umount_command(
mountpoints,
force=True,
ignore_error=True
)))
commands.append(" ".join(libioc.helpers.umount_command(
["-a", "-F", self.fstab.path],
force=True,
ignore_error=True
)))
if self.config.legacy is True:
commands.append(" | ".join([
"mount -t nullfs",
"sed -r 's/(.+) on (.+) \\(nullfs, .+\\)$/\\2/'",
f"grep '^{self.root_dataset.mountpoint}/'",
"xargs umount"
]))
return commands
def _get_absolute_path_from_jail_asset(
self,
value: str
) -> libioc.Types.AbsolutePath:
return libioc.Types.AbsolutePath(f"{self.root_path}{value}")
def _resolve_name(self, text: str) -> str:
if (text is None) or (len(text) == 0):
raise libioc.errors.JailNotSupplied(logger=self.logger)
resource_selector = libioc.ResourceSelector.ResourceSelector(
name=text,
logger=self.logger
)
root_datasets = resource_selector.filter_datasets(self.host.datasets)
for datasets_key, datasets in root_datasets.items():
for dataset in list(datasets.jails.children):
dataset_name = str(
dataset.name[(len(datasets.jails.name) + 1):]
)
humanreadable_name = libioc.helpers.to_humanreadable_name(
dataset_name
)
possible_names = [dataset_name, humanreadable_name]
if resource_selector.name in possible_names:
return dataset_name
raise libioc.errors.JailNotFound(text, logger=self.logger)
@property
def name(self) -> str:
"""Return the configured jail id."""
return str(self.config["id"])
@property
def full_name(self) -> str:
"""
Return the full identifier of a jail.
When more than one root dataset is managed by iocage, the full source
and name are returned. Otherwise just the name.
For example `mydataset/jailname` or just `jailname`.
"""
if len(self.host.datasets) > 1:
return f"{self.source}/{self.name}"
else:
return self.name
@property
def humanreadable_name(self) -> str:
"""
Return the human-readable identifier to print in logs and CLI output.
Whenever a Jail is found to have a UUID as identifier,
a shortened string of the first 8 characters is returned
"""
try:
return str(libioc.helpers.to_humanreadable_name(self.name))
except KeyError:
raise libioc.errors.JailUnknownIdentifier(
logger=self.logger
)
@property
def stopped(self) -> bool:
"""Return True if a jail is stopped."""
return self.running is not True
@property
def running(self) -> bool:
"""Return True if a jail is running."""
return self.jid is not None
@property
def jid(self) -> typing.Optional[int]:
"""Return a jails JID if it is running or None."""
if "_state" not in object.__dir__(self):
# force state init when jid was requested
self._init_state()
try:
return int(self.state["jid"])
except (KeyError, TypeError):
return None
@property
def env(self) -> typing.Dict[str, str]:
"""Return the environment variables for hook scripts."""
jail_env: typing.Dict[str, str]
if self.config["exec_clean"] is False:
jail_env = os.environ.copy()
else:
jail_env = {}
for prop in self.config.all_properties:
prop_name = f"IOC_{prop.replace('.', '_').upper()}"
jail_env[prop_name] = str(self.config[prop])
jail_env["IOC_JAIL_PATH"] = self.root_dataset.mountpoint
jail_env["IOC_JID"] = str(self.jid)
jail_env["PATH"] = ":".join((
"/sbin",
"/bin",
"/usr/sbin",
"/usr/bin",
"/usr/local/sbin",
"/usr/local/bin",
))
return jail_env
@property
def identifier(self) -> str:
"""Return the jail id used in snapshots, jls, etc."""
config = object.__getattribute__(self, 'config')
return f"{self.source}-{config['id']}"
@property
def release(self) -> 'libioc.Release.ReleaseGenerator':
"""Return the libioc.Release instance linked with the jail."""
return libioc.Release.ReleaseGenerator(
name=self.config["release"],
root_datasets_name=self.root_datasets_name,
logger=self.logger,
host=self.host,
zfs=self.zfs
)
@property
def release_snapshot(self) -> libzfs.ZFSSnapshot:
"""Return the matching release verion snaphsot."""
snapshot: libzfs.ZFSSnapshot = self.release.current_snapshot
return snapshot
def __getattribute__(self, key: str) -> typing.Any:
"""Get an attribute from the jail, state or configuration."""
try:
return object.__getattribute__(self, key)
except AttributeError:
pass
if "_state" in object.__dir__(self):
try:
return object.__getattribute__(self, "state")[key]
except (AttributeError, KeyError):
pass
raise AttributeError(f"Jail property {key} not found")
def __dir__(self) -> typing.List[str]:
"""Get all accessible properties of a jail."""
properties = set()
for prop in dict.__dir__(self):
if not prop.startswith("_"):
properties.add(prop)
return list(properties)
def __eq__(self, other: typing.Any) -> bool:
"""
Compare two Jails by their name.
The jail is identified by its full name, including the iocage root
dataset name in case there is more than one enabled on the host.
"""
if isinstance(other, JailGenerator):
return False
return (self.full_name == other.full_name) is True
class Jail(JailGenerator):
"""Synchronous wrapper of JailGenerator."""
def start( # noqa: T484
self,
*args,
**kwargs
) -> typing.List['libioc.events.IocEvent']:
"""Start the jail."""
return list(JailGenerator.start(self, *args, **kwargs))
def stop( # noqa: T484
self,
*args,
**kwargs
) -> typing.List['libioc.events.IocEvent']:
"""Stop the jail."""
return list(JailGenerator.stop(self, *args, **kwargs))
def rename( # noqa: T484
self,
*args,
**kwargs
) -> typing.List['libioc.events.IocEvent']:
"""Rename the jail."""
return list(JailGenerator.rename(self, *args, **kwargs))
def _update_fstab_paths( # noqa: T484
self,
*args,
**kwargs
) -> typing.List['libioc.events.IocEvent']:
"""Update a path in the whole fstab file."""
return list(JailGenerator._update_fstab_paths(self, *args, **kwargs))
def destroy( # noqa: T484
self,
force: bool=False
) -> typing.List['libioc.events.IocEvent']:
"""
Destroy a Jail and it's datasets.
Args:
force (bool): (default=False)
This flag enables whether an existing jail should be shut down
before destroying the dataset. By default destroying a jail
requires it to be stopped.
"""
return list(JailGenerator.destroy(self, force=force))
def fork_exec( # noqa: T484
self,
command: str,
passthru: bool=False,
event_scope: typing.Optional['libioc.events.Scope']=None,
dependant_jails_seen: typing.List['JailGenerator']=[],
start_dependant_jails: bool=True,
**temporary_config_override
) -> str:
"""
Start a jail, run a command and shut it down immediately.
Args:
command (string):
The command to execute in the jail.
passthru (bool):
Execute commands in an interactive shell.
event_scope (libioc.lib.events.Scope): (default=None)
Provide an existing libiocage event scope or automatically
create a new one instead.
dependant_jails_seen (list[libioc.JailGenerator]):
Jail depends can have circular dependencies. By passing a list
of already started jails to the start command, iocage does not
need to query their state, because they are known to be running
already. This argument is internally used when starting a jails
dependants recursively.
start_dependant_jails (bool):
When disabled, no dependant jails will be started.
**temporary_config_override (dict(str, any)):
Other named arguments temporary override JailConfig properties.
For example:
jail = libioc.JailGenerator("myjail")
events = jail.fork_exec("ifconfig", vnet=False)
print(list(events))
"""
events = JailGenerator.fork_exec(
self,
command=command,
passthru=passthru,
event_scope=event_scope,
dependant_jails_seen=dependant_jails_seen,
start_dependant_jails=start_dependant_jails,
**temporary_config_override
)
for event in events:
if isinstance(event, libioc.events.JailLaunch) and event.done:
return str(event.stdout)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
grpc/cmd/cproto/main.go
|
// Copyright 2016 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"go.chromium.org/luci/common/flag/stringlistflag"
"go.chromium.org/luci/common/flag/stringmapflag"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/logging/gologger"
"go.chromium.org/luci/common/system/exitcode"
)
var (
verbose = flag.Bool("verbose", false, "print debug messages to stderr")
protoImportPaths = stringlistflag.Flag{}
pathMap = stringmapflag.Value{}
withDiscovery = flag.Bool(
"discovery", true,
"generate pb.discovery.go file")
descFile = flag.String(
"desc",
"",
"Writes a FileDescriptorSet file containing all the the .proto files and their transitive dependencies",
)
)
// Well-known Google proto packages -> go packages they are implemented in.
var googlePackages = map[string]string{
"google/protobuf/any.proto": "github.com/golang/protobuf/ptypes/any",
"google/protobuf/descriptor.proto": "github.com/golang/protobuf/protoc-gen-go/descriptor",
"google/protobuf/duration.proto": "github.com/golang/protobuf/ptypes/duration",
"google/protobuf/empty.proto": "github.com/golang/protobuf/ptypes/empty",
"google/protobuf/struct.proto": "github.com/golang/protobuf/ptypes/struct",
"google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp",
"google/protobuf/wrappers.proto": "github.com/golang/protobuf/ptypes/wrappers",
"google/rpc/code.proto": "google.golang.org/genproto/googleapis/rpc/code",
"google/rpc/error_details.proto": "google.golang.org/genproto/googleapis/rpc/errdetails",
"google/rpc/status.proto": "google.golang.org/genproto/googleapis/rpc/status",
}
// compile runs protoc on protoFiles. protoFiles must be relative to dir.
func compile(c context.Context, gopath, importPaths, protoFiles []string, dir, descSetOut string) (outDir string, err error) {
// make it absolute to find in $GOPATH and because protoc wants paths
// to be under proto paths.
if dir, err = filepath.Abs(dir); err != nil {
return "", err
}
// By default place go files in CWD,
// unless proto files are under a $GOPATH/src.
goOut := "."
// Combine user-defined proto paths with $GOPATH/src.
allProtoPaths := make([]string, 0, len(importPaths)+len(gopath)+1)
for _, p := range importPaths {
if p, err = filepath.Abs(p); err != nil {
return "", err
}
allProtoPaths = append(allProtoPaths, p)
}
for _, p := range gopath {
path := filepath.Join(p, "src")
if info, err := os.Stat(path); os.IsNotExist(err) || !info.IsDir() {
continue
} else if err != nil {
return "", err
}
allProtoPaths = append(allProtoPaths, path)
// If the dir is under $GOPATH/src, generate .go files near .proto files.
if strings.HasPrefix(dir, path) {
goOut = path
}
// Include well-known protobuf types.
wellKnownProtoDir := filepath.Join(path, "go.chromium.org", "luci", "grpc", "proto")
if info, err := os.Stat(wellKnownProtoDir); err == nil && info.IsDir() {
allProtoPaths = append(allProtoPaths, wellKnownProtoDir)
}
}
// Find where Go files will be generated.
for _, p := range allProtoPaths {
if strings.HasPrefix(dir, p) {
outDir = filepath.Join(goOut, dir[len(p):])
break
}
}
if outDir == "" {
return "", fmt.Errorf("proto files are neither under $GOPATH/src nor -proto-path")
}
args := []string{
"--descriptor_set_out=" + descSetOut,
"--include_imports",
"--include_source_info",
}
for _, p := range allProtoPaths {
args = append(args, "--proto_path="+p)
}
var params []string
for k, v := range pathMap {
params = append(params, fmt.Sprintf("M%s=%s", k, v))
}
params = append(params, "plugins=grpc")
args = append(args, fmt.Sprintf("--go_out=%s:%s", strings.Join(params, ","), goOut))
for _, f := range protoFiles {
// We must prepend an go-style absolute path to the filename otherwise
// protoc will complain that the files we specify here are not found
// in any of proto-paths.
//
// We cannot specify --proto-path=. because of the following scenario:
// we have file structure
// - A
// - x.proto, imports "y.proto"
// - y.proto
// - B
// - z.proto, imports "github.com/user/repo/A/x.proto"
// If cproto is executed in B, proto path does not include A, so y.proto
// is not found.
// The solution is to always use absolute paths.
args = append(args, path.Join(dir, f))
}
logging.Infof(c, "protoc %s", strings.Join(args, " "))
protoc := exec.Command("protoc", args...)
protoc.Stdout = os.Stdout
protoc.Stderr = os.Stderr
return outDir, protoc.Run()
}
func run(c context.Context, goPath []string, dir string) error {
if s, err := os.Stat(dir); os.IsNotExist(err) {
return fmt.Errorf("%s does not exist", dir)
} else if err != nil {
return err
} else if !s.IsDir() {
return fmt.Errorf("%s is not a directory", dir)
}
// Find .proto files
protoFiles, err := findProtoFiles(dir)
if err != nil {
return err
}
if len(protoFiles) == 0 {
return fmt.Errorf(".proto files not found")
}
// Compile all .proto files.
descPath := *descFile
if descPath == "" {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
descPath = filepath.Join(tmpDir, "package.desc")
}
outDir, err := compile(c, goPath, protoImportPaths, protoFiles, dir, descPath)
if err != nil {
return err
}
// Transform .go files
var goPkg, protoPkg string
for _, p := range protoFiles {
goFile := filepath.Join(outDir, strings.TrimSuffix(p, ".proto")+".pb.go")
var t transformer
if err := t.transformGoFile(goFile); err != nil {
return fmt.Errorf("could not transform %s: %s", goFile, err)
}
if protoPkg == "" && len(t.services) > 0 {
protoPkg = t.services[0].protoPackageName
}
if goPkg == "" {
goPkg = t.PackageName
}
if strings.HasSuffix(p, "_test.proto") {
newName := strings.TrimSuffix(goFile, ".go") + "_test.go"
if err := os.Rename(goFile, newName); err != nil {
return err
}
}
}
if *withDiscovery && goPkg != "" && protoPkg != "" {
// Generate pb.prpc.go
discoveryFile := "pb.discovery.go"
if err := genDiscoveryFile(c, filepath.Join(outDir, discoveryFile), descPath, protoPkg, goPkg); err != nil {
return err
}
}
return nil
}
func setupLogging(c context.Context) context.Context {
lvl := logging.Warning
if *verbose {
lvl = logging.Debug
}
return logging.SetLevel(gologger.StdConfig.Use(context.Background()), lvl)
}
func usage() {
fmt.Fprintln(os.Stderr,
`Compiles all .proto files in a directory to .go with grpc+prpc support.
usage: cproto [flags] [dir]
If the dir is not under $GOPATH/src, places generated Go files relative to $CWD.
Flags:`)
flag.PrintDefaults()
}
func main() {
flag.Var(
&protoImportPaths,
"proto-path",
"additional proto import paths besides $GOPATH/src; "+
"May be relative to CWD; "+
"May be specified multiple times.")
flag.Var(
&pathMap,
"map-package",
"Maps a proto path to a go package name. "+
"May be specified multiple times.")
flag.Usage = usage
flag.Parse()
for k, v := range googlePackages {
if _, ok := pathMap[k]; !ok {
pathMap[k] = v
}
}
if flag.NArg() > 1 {
flag.Usage()
os.Exit(1)
}
dir := "."
if flag.NArg() == 1 {
dir = flag.Arg(0)
}
c := setupLogging(context.Background())
goPath := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator))
if err := run(c, goPath, dir); err != nil {
exitCode := 1
if rc, ok := exitcode.Get(err); ok {
exitCode = rc
} else {
fmt.Fprintln(os.Stderr, err.Error())
}
os.Exit(exitCode)
}
}
// findProtoFiles returns .proto files in dir. The returned file paths
// are relative to dir.
func findProtoFiles(dir string) ([]string, error) {
files, err := filepath.Glob(filepath.Join(dir, "*.proto"))
if err != nil {
return nil, err
}
for i, f := range files {
files[i] = filepath.Base(f)
}
return files, err
}
// isInPackage returns true if the filename is a part of the package.
func isInPackage(fileName string, pkg string) (bool, error) {
dir, err := filepath.Abs(filepath.Dir(fileName))
if err != nil {
return false, err
}
dir = path.Clean(dir)
pkg = path.Clean(pkg)
if !strings.HasSuffix(dir, pkg) {
return false, nil
}
src := strings.TrimSuffix(dir, pkg)
src = path.Clean(src)
goPaths := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator))
for _, goPath := range goPaths {
if filepath.Join(goPath, "src") == src {
return true, nil
}
}
return false, nil
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
cryptocurrency/asgi.py
|
"""
ASGI config for cryptocurrency project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cryptocurrency.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/containers/common/pkg/auth/auth.go
|
package auth
import (
"bufio"
"context"
"fmt"
"os"
"strings"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
)
// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default --authfile path
// used in multiple --authfile flag definitions
func GetDefaultAuthFile() string {
return os.Getenv("REGISTRY_AUTH_FILE")
}
// CheckAuthFile validates filepath given by --authfile
// used by command has --authfile flag
func CheckAuthFile(authfile string) error {
if authfile == "" {
return nil
}
if _, err := os.Stat(authfile); err != nil {
return errors.Wrapf(err, "error checking authfile path %s", authfile)
}
return nil
}
// systemContextWithOptions returns a version of sys
// updated with authFile and certDir values (if they are not "").
// NOTE: this is a shallow copy that can be used and updated, but may share
// data with the original parameter.
func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext {
if sys != nil {
copy := *sys
sys = ©
} else {
sys = &types.SystemContext{}
}
if authFile != "" {
sys.AuthFilePath = authFile
}
if certDir != "" {
sys.DockerCertPath = certDir
}
return sys
}
// Login implements a “log in” command with the provided opts and args
// reading the password from opts.Stdin or the options in opts.
func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error {
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir)
var (
server string
err error
)
if len(args) > 1 {
return errors.Errorf("login accepts only one registry to login to")
}
if len(args) == 0 {
if !opts.AcceptUnspecifiedRegistry {
return errors.Errorf("please provide a registry to login to")
}
if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
}
logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
} else {
server = getRegistryName(args[0])
}
authConfig, err := config.GetCredentials(systemContext, server)
if err != nil {
return errors.Wrapf(err, "error reading auth file")
}
if opts.GetLoginSet {
if authConfig.Username == "" {
return errors.Errorf("not logged into %s", server)
}
fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username)
return nil
}
if authConfig.IdentityToken != "" {
return errors.Errorf("currently logged in, auth file contains an Identity token")
}
password := opts.Password
if opts.StdinPassword {
var stdinPasswordStrBuilder strings.Builder
if opts.Password != "" {
return errors.Errorf("Can't specify both --password-stdin and --password")
}
if opts.Username == "" {
return errors.Errorf("Must provide --username with --password-stdin")
}
scanner := bufio.NewScanner(opts.Stdin)
for scanner.Scan() {
fmt.Fprint(&stdinPasswordStrBuilder, scanner.Text())
}
password = stdinPasswordStrBuilder.String()
}
// If no username and no password is specified, try to use existing ones.
if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" {
fmt.Println("Authenticating with existing credentials...")
if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, server); err == nil {
fmt.Fprintln(opts.Stdout, "Existing credentials are valid. Already logged in to", server)
return nil
}
fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password")
}
username, password, err := getUserAndPass(opts, password, authConfig.Username)
if err != nil {
return errors.Wrapf(err, "error getting username and password")
}
if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
// Write the new credentials to the authfile
if err = config.SetAuthentication(systemContext, server, username, password); err != nil {
return err
}
}
if err == nil {
fmt.Fprintln(opts.Stdout, "Login Succeeded!")
return nil
}
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
logrus.Debugf("error logging into %q: %v", server, unauthorized)
return errors.Errorf("error logging into %q: invalid username/password", server)
}
return errors.Wrapf(err, "error authenticating creds for %q", server)
}
// getRegistryName scrubs and parses the input to get the server name
func getRegistryName(server string) string {
// removes 'http://' or 'https://' from the front of the
// server/registry string if either is there. This will be mostly used
// for user input from 'Buildah login' and 'Buildah logout'.
server = strings.TrimPrefix(strings.TrimPrefix(server, "https://"), "http://")
// gets the registry from the input. If the input is of the form
// quay.io/myuser/myimage, it will parse it and just return quay.io
split := strings.Split(server, "/")
if len(split) > 1 {
return split[0]
}
return split[0]
}
// getUserAndPass gets the username and password from STDIN if not given
// using the -u and -p flags. If the username prompt is left empty, the
// displayed userFromAuthFile will be used instead.
func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (string, string, error) {
var err error
reader := bufio.NewReader(opts.Stdin)
username := opts.Username
if username == "" {
if userFromAuthFile != "" {
fmt.Fprintf(opts.Stdout, "Username (%s): ", userFromAuthFile)
} else {
fmt.Fprint(opts.Stdout, "Username: ")
}
username, err = reader.ReadString('\n')
if err != nil {
return "", "", errors.Wrapf(err, "error reading username")
}
// If the user just hit enter, use the displayed user from the
// the authentication file. This allows to do a lazy
// `$ buildah login -p $NEW_PASSWORD` without specifying the
// user.
if strings.TrimSpace(username) == "" {
username = userFromAuthFile
}
}
if password == "" {
fmt.Fprint(opts.Stdout, "Password: ")
pass, err := terminal.ReadPassword(0)
if err != nil {
return "", "", errors.Wrapf(err, "error reading password")
}
password = string(pass)
fmt.Fprintln(opts.Stdout)
}
return strings.TrimSpace(username), password, err
}
// Logout implements a “log out” command with the provided opts and args
func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []string) error {
if err := CheckAuthFile(opts.AuthFile); err != nil {
return err
}
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "")
var (
server string
err error
)
if len(args) > 1 {
return errors.Errorf("logout accepts only one registry to logout from")
}
if len(args) == 0 && !opts.All {
if !opts.AcceptUnspecifiedRegistry {
return errors.Errorf("please provide a registry to logout from")
}
if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
}
logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
}
if len(args) != 0 {
if opts.All {
return errors.Errorf("--all takes no arguments")
}
server = getRegistryName(args[0])
}
if opts.All {
if err := config.RemoveAllAuthentication(systemContext); err != nil {
return err
}
fmt.Fprintln(opts.Stdout, "Removed login credentials for all registries")
return nil
}
err = config.RemoveAuthentication(systemContext, server)
switch err {
case nil:
fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", server)
return nil
case config.ErrNotLoggedIn:
return errors.Errorf("Not logged into %s\n", server)
default:
return errors.Wrapf(err, "error logging out of %q", server)
}
}
// defaultRegistryWhenUnspecified returns first registry from search list of registry.conf
// used by login/logout when registry argument is not specified
func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) {
registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext)
if err != nil {
return "", errors.Wrapf(err, "error getting registry from registry.conf, please specify a registry")
}
if len(registriesFromFile) == 0 {
return "", errors.Errorf("no registries found in registries.conf, a registry must be provided")
}
return registriesFromFile[0], nil
}
|
[
"\"REGISTRY_AUTH_FILE\""
] |
[] |
[
"REGISTRY_AUTH_FILE"
] |
[]
|
["REGISTRY_AUTH_FILE"]
|
go
| 1 | 0 | |
exporter/sentinels_test.go
|
package exporter
import (
"fmt"
"os"
"strings"
"testing"
"github.com/gomodule/redigo/redis"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
func TestExtractInfoMetricsSentinel(t *testing.T) {
if os.Getenv("TEST_REDIS_SENTINEL_URI") == "" {
t.Skipf("TEST_REDIS_SENTINEL_URI not set - skipping")
}
addr := os.Getenv("TEST_REDIS_SENTINEL_URI")
e, _ := NewRedisExporter(
addr,
Options{Namespace: "test"},
)
c, err := redis.DialURL(addr)
if err != nil {
t.Fatalf("Couldn't connect to %#v: %#v", addr, err)
}
infoAll, err := redis.String(doRedisCmd(c, "INFO", "ALL"))
if err != nil {
t.Logf("Redis INFO ALL err: %s", err)
infoAll, err = redis.String(doRedisCmd(c, "INFO"))
if err != nil {
t.Fatalf("Redis INFO err: %s", err)
}
}
chM := make(chan prometheus.Metric)
go func() {
e.extractInfoMetrics(chM, infoAll, 0)
close(chM)
}()
want := map[string]bool{
"sentinel_tilt": false,
"sentinel_running_scripts": false,
"sentinel_scripts_queue_length": false,
"sentinel_simulate_failure_flags": false,
"sentinel_masters": false,
"sentinel_master_status": false,
"sentinel_master_slaves": false,
"sentinel_master_sentinels": false,
}
for m := range chM {
for k := range want {
if strings.Contains(m.Desc().String(), k) {
want[k] = true
}
}
}
for k, found := range want {
if !found {
t.Errorf("didn't find %s", k)
}
}
}
type sentinelData struct {
k, v string
name, status, address string
slaves, sentinels float64
ok bool
}
func TestParseSentinelMasterString(t *testing.T) {
tsts := []sentinelData{
{k: "master0", v: "name=user03,status=sdown,address=192.169.2.52:6381,slaves=1,sentinels=5", name: "user03", status: "sdown", address: "192.169.2.52:6381", slaves: 1, sentinels: 5, ok: true},
{k: "master1", v: "name=master,status=ok,address=127.0.0.1:6379,slaves=999,sentinels=500", name: "master", status: "ok", address: "127.0.0.1:6379", slaves: 999, sentinels: 500, ok: true},
{k: "master", v: "name=user03", ok: false},
{k: "masterA", v: "status=ko", ok: false},
{k: "master0", v: "slaves=abc,sentinels=0", ok: false},
{k: "master0", v: "slaves=0,sentinels=abc", ok: false},
}
for _, tst := range tsts {
name := fmt.Sprintf("%s---%s", tst.k, tst.v)
t.Run(name, func(t *testing.T) {
if masterName, masterStatus, masterAddress, masterSlaves, masterSentinels, ok := parseSentinelMasterString(tst.k, tst.v); true {
if ok != tst.ok {
t.Errorf("failed for: master:%s data:%s", tst.k, tst.v)
return
}
if masterName != tst.name || masterStatus != tst.status || masterAddress != tst.address || masterSlaves != tst.slaves || masterSentinels != tst.sentinels {
t.Errorf("values not matching:\nstring:%s\ngot:%s %s %s %f %f", tst.v, masterName, masterStatus, masterAddress, masterSlaves, masterSentinels)
}
}
})
}
}
func TestExtractSentinelMetricsForRedis(t *testing.T) {
if os.Getenv("TEST_REDIS_URI") == "" {
t.Skipf("TEST_REDIS_URI not set - skipping")
}
addr := os.Getenv("TEST_REDIS_URI")
e, _ := NewRedisExporter(
addr,
Options{Namespace: "test"},
)
c, err := redis.DialURL(addr)
if err != nil {
t.Fatalf("Couldn't connect to %#v: %#v", addr, err)
}
defer c.Close()
chM := make(chan prometheus.Metric)
go func() {
e.extractSentinelMetrics(chM, c)
close(chM)
}()
want := map[string]bool{
"sentinel_master_ok_sentinels": false,
"sentinel_master_ok_slaves": false,
}
for m := range chM {
for k := range want {
if strings.Contains(m.Desc().String(), k) {
want[k] = true
}
}
}
for k, found := range want {
if found {
t.Errorf("Found sentinel metric %s for redis instance", k)
}
}
}
func TestExtractSentinelMetricsForSentinel(t *testing.T) {
if os.Getenv("TEST_REDIS_SENTINEL_URI") == "" {
t.Skipf("TEST_REDIS_SENTINEL_URI not set - skipping")
}
addr := os.Getenv("TEST_REDIS_SENTINEL_URI")
e, _ := NewRedisExporter(
addr,
Options{Namespace: "test"},
)
c, err := redis.DialURL(addr)
if err != nil {
t.Fatalf("Couldn't connect to %#v: %#v", addr, err)
}
defer c.Close()
infoAll, err := redis.String(doRedisCmd(c, "INFO", "ALL"))
if err != nil {
t.Logf("Redis INFO ALL err: %s", err)
infoAll, err = redis.String(doRedisCmd(c, "INFO"))
if err != nil {
t.Fatalf("Redis INFO err: %s", err)
}
}
chM := make(chan prometheus.Metric)
if strings.Contains(infoAll, "# Sentinel") {
go func() {
e.extractSentinelMetrics(chM, c)
close(chM)
}()
} else {
t.Fatalf("Couldn't find sentinel section in Redis INFO: %s", infoAll)
}
want := map[string]bool{
"sentinel_master_ok_sentinels": false,
"sentinel_master_ok_slaves": false,
}
for m := range chM {
for k := range want {
if strings.Contains(m.Desc().String(), k) {
want[k] = true
}
}
}
for k, found := range want {
if !found {
t.Errorf("didn't find metric %s", k)
}
}
}
type sentinelSentinelsData struct {
name string
sentinelDetails []interface{}
labels []string
expectedMetricValue map[string]int
}
func TestProcessSentinelSentinels(t *testing.T) {
if os.Getenv("TEST_REDIS_SENTINEL_URI") == "" {
t.Skipf("TEST_REDIS_SENTINEL_URI not set - skipping")
}
addr := os.Getenv("TEST_REDIS_SENTINEL_URI")
e, _ := NewRedisExporter(
addr,
Options{Namespace: "test"},
)
oneOkSentinelExpectedMetricValue := map[string]int{
"sentinel_master_ok_sentinels": 1,
}
twoOkSentinelExpectedMetricValue := map[string]int{
"sentinel_master_ok_sentinels": 2,
}
tsts := []sentinelSentinelsData{
{"1/1 okay sentinel", []interface{}{[]interface{}{[]byte("")}}, []string{"mymaster", "172.17.0.7:26379"}, oneOkSentinelExpectedMetricValue},
{"1/3 okay sentinel", []interface{}{[]interface{}{[]byte("name"), []byte("284bc2ef46881bd71e81610152cb96031d211d28"), []byte("ip"), []byte("172.17.0.8"), []byte("port"), []byte("26379"), []byte("runid"), []byte("284bc2ef46881bd71e81610152cb96031d211d28"), []byte("flags"), []byte("o_down,s_down,sentinel"), []byte("link-pending-commands"), []byte("38"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("11828891"), []byte("last-ok-ping-reply"), []byte("11829539"), []byte("last-ping-reply"), []byte("11829539"), []byte("s-down-time"), []byte("11823816"), []byte("down-after-milliseconds"), []byte("5000"), []byte("last-hello-message"), []byte("11829434"), []byte("voted-leader"), []byte("?"), []byte("voted-leader-epoch"), []byte("0")}, []interface{}{[]byte("name"), []byte("c3ab3cdcaeb193bb49b16d4d3da88def984ab3bf"), []byte("ip"), []byte("172.17.0.7"), []byte("port"), []byte("26379"), []byte("runid"), []byte("c3ab3cdcaeb193bb49b16d4d3da88def984ab3bf"), []byte("flags"), []byte("s_down,sentinel"), []byte("link-pending-commands"), []byte("38"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("11828891"), []byte("last-ok-ping-reply"), []byte("11829539"), []byte("last-ping-reply"), []byte("11829539"), []byte("s-down-time"), []byte("11823815"), []byte("down-after-milliseconds"), []byte("5000"), []byte("last-hello-message"), []byte("11829434"), []byte("voted-leader"), []byte("?"), []byte("voted-leader-epoch"), []byte("0")}}, []string{"mymaster", "172.17.0.7:26379"}, oneOkSentinelExpectedMetricValue},
{"2/3 okay sentinel(string is not byte slice)", []interface{}{[]interface{}{[]byte("name"), []byte("284bc2ef46881bd71e81610152cb96031d211d28"), []byte("ip"), []byte("172.17.0.8"), []byte("port"), []byte("26379"), []byte("runid"), []byte("284bc2ef46881bd71e81610152cb96031d211d28"), []byte("flags"), []byte("sentinel"), []byte("link-pending-commands"), []byte("38"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("11828891"), []byte("last-ok-ping-reply"), []byte("11829539"), []byte("last-ping-reply"), []byte("11829539"), []byte("s-down-time"), []byte("11823816"), []byte("down-after-milliseconds"), []byte("5000"), []byte("last-hello-message"), []byte("11829434"), []byte("voted-leader"), []byte("?"), []byte("voted-leader-epoch"), []byte("0")}, []interface{}{[]byte("name"), []byte("c3ab3cdcaeb193bb49b16d4d3da88def984ab3bf"), []byte("ip"), []byte("172.17.0.7"), []byte("port"), []byte("26379"), []byte("runid"), []byte("c3ab3cdcaeb193bb49b16d4d3da88def984ab3bf"), []byte("flags"), "sentinel", []byte("link-pending-commands"), []byte("38"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("11828891"), []byte("last-ok-ping-reply"), []byte("11829539"), []byte("last-ping-reply"), []byte("11829539"), []byte("s-down-time"), []byte("11823815"), []byte("down-after-milliseconds"), []byte("5000"), []byte("last-hello-message"), []byte("11829434"), []byte("voted-leader"), []byte("?"), []byte("voted-leader-epoch"), []byte("0")}}, []string{"mymaster", "172.17.0.7:26379"}, twoOkSentinelExpectedMetricValue},
{"2/3 okay sentinel", []interface{}{[]interface{}{[]byte("name"), []byte("284bc2ef46881bd71e81610152cb96031d211d28"), []byte("ip"), []byte("172.17.0.8"), []byte("port"), []byte("26379"), []byte("runid"), []byte("284bc2ef46881bd71e81610152cb96031d211d28"), []byte("flags"), []byte("sentinel"), []byte("link-pending-commands"), []byte("38"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("11828891"), []byte("last-ok-ping-reply"), []byte("11829539"), []byte("last-ping-reply"), []byte("11829539"), []byte("s-down-time"), []byte("11823816"), []byte("down-after-milliseconds"), []byte("5000"), []byte("last-hello-message"), []byte("11829434"), []byte("voted-leader"), []byte("?"), []byte("voted-leader-epoch"), []byte("0")}, []interface{}{[]byte("name"), []byte("c3ab3cdcaeb193bb49b16d4d3da88def984ab3bf"), []byte("ip"), []byte("172.17.0.7"), []byte("port"), []byte("26379"), []byte("runid"), []byte("c3ab3cdcaeb193bb49b16d4d3da88def984ab3bf"), []byte("flags"), []byte("s_down,sentinel"), []byte("link-pending-commands"), []byte("38"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("11828891"), []byte("last-ok-ping-reply"), []byte("11829539"), []byte("last-ping-reply"), []byte("11829539"), []byte("s-down-time"), []byte("11823815"), []byte("down-after-milliseconds"), []byte("5000"), []byte("last-hello-message"), []byte("11829434"), []byte("voted-leader"), []byte("?"), []byte("voted-leader-epoch"), []byte("0")}}, []string{"mymaster", "172.17.0.7:26379"}, twoOkSentinelExpectedMetricValue},
{"2/3 okay sentinel(missing flags)", []interface{}{[]interface{}{[]byte("name"), []byte("284bc2ef46881bd71e81610152cb96031d211d28"), []byte("ip"), []byte("172.17.0.8"), []byte("port"), []byte("26379"), []byte("runid"), []byte("284bc2ef46881bd71e81610152cb96031d211d28"), []byte("flags"), []byte("sentinel"), []byte("link-pending-commands"), []byte("38"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("11828891"), []byte("last-ok-ping-reply"), []byte("11829539"), []byte("last-ping-reply"), []byte("11829539"), []byte("s-down-time"), []byte("11823816"), []byte("down-after-milliseconds"), []byte("5000"), []byte("last-hello-message"), []byte("11829434"), []byte("voted-leader"), []byte("?"), []byte("voted-leader-epoch"), []byte("0")}, []interface{}{[]byte("name"), []byte("c3ab3cdcaeb193bb49b16d4d3da88def984ab3bf"), []byte("ip"), []byte("172.17.0.7"), []byte("port"), []byte("26379"), []byte("runid"), []byte("c3ab3cdcaeb193bb49b16d4d3da88def984ab3bf"), []byte("link-pending-commands"), []byte("38"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("11828891"), []byte("last-ok-ping-reply"), []byte("11829539"), []byte("last-ping-reply"), []byte("11829539"), []byte("s-down-time"), []byte("11823815"), []byte("down-after-milliseconds"), []byte("5000"), []byte("last-hello-message"), []byte("11829434"), []byte("voted-leader"), []byte("?"), []byte("voted-leader-epoch"), []byte("0")}}, []string{"mymaster", "172.17.0.7:26379"}, twoOkSentinelExpectedMetricValue},
}
for _, tst := range tsts {
t.Run(tst.name, func(t *testing.T) {
chM := make(chan prometheus.Metric)
go func() {
e.processSentinelSentinels(chM, tst.sentinelDetails, tst.labels...)
close(chM)
}()
want := map[string]bool{
"sentinel_master_ok_sentinels": false,
}
for m := range chM {
for k := range want {
if strings.Contains(m.Desc().String(), k) {
want[k] = true
got := &dto.Metric{}
m.Write(got)
val := got.GetGauge().GetValue()
if int(val) != tst.expectedMetricValue[k] {
t.Errorf("Expected metric value %d didn't match to reported value %d for test %s", tst.expectedMetricValue[k], int(val), tst.name)
}
}
}
}
for k, found := range want {
if !found {
t.Errorf("didn't find metric %s", k)
}
}
})
}
}
type sentinelSlavesData struct {
name string
slaveDetails []interface{}
labels []string
expectedMetricValue map[string]int
}
func TestProcessSentinelSlaves(t *testing.T) {
if os.Getenv("TEST_REDIS_SENTINEL_URI") == "" {
t.Skipf("TEST_REDIS_SENTINEL_URI not set - skipping")
}
addr := os.Getenv("TEST_REDIS_SENTINEL_URI")
e, _ := NewRedisExporter(
addr,
Options{Namespace: "test"},
)
zeroOkSlaveExpectedMetricValue := map[string]int{
"sentinel_master_ok_slaves": 0,
}
oneOkSlaveExpectedMetricValue := map[string]int{
"sentinel_master_ok_slaves": 1,
}
twoOkSlaveExpectedMetricValue := map[string]int{
"sentinel_master_ok_slaves": 2,
}
tsts := []sentinelSlavesData{
{"0/1 okay slave(string is not byte slice)", []interface{}{[]interface{}{[]string{"name"}, []byte("172.17.0.3:6379"), []byte("ip"), []byte("172.17.0.3"), []byte("port"), []byte("6379"), []byte("runid"), []byte("42ebb784f2bd560903de9fb7d4533263d5db558a"), []byte("flags"), []byte("slave"), []byte("link-pending-commands"), []byte("0"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("0"), []byte("last-ok-ping-reply"), []byte("490"), []byte("last-ping-reply"), []byte("490"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("2636"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("48279581"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("172.17.0.2"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("765829")}}, []string{"mymaster", "172.17.0.7:26379"}, zeroOkSlaveExpectedMetricValue},
{"1/1 okay slave", []interface{}{[]interface{}{[]byte("name"), []byte("172.17.0.3:6379"), []byte("ip"), []byte("172.17.0.3"), []byte("port"), []byte("6379"), []byte("runid"), []byte("42ebb784f2bd560903de9fb7d4533263d5db558a"), []byte("flags"), []byte("slave"), []byte("link-pending-commands"), []byte("0"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("0"), []byte("last-ok-ping-reply"), []byte("490"), []byte("last-ping-reply"), []byte("490"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("2636"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("48279581"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("172.17.0.2"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("765829")}}, []string{"mymaster", "172.17.0.7:26379"}, oneOkSlaveExpectedMetricValue},
{"1/3 okay slave", []interface{}{[]interface{}{[]byte("name"), []byte("172.17.0.6:6379"), []byte("ip"), []byte("172.17.0.6"), []byte("port"), []byte("6379"), []byte("runid"), []byte("254576b435fcd73121a6497d3b03f3a464de9a10"), []byte("flags"), []byte("slave"), []byte("link-pending-commands"), []byte("0"), []byte("last-ok-ping-reply"), []byte("1021"), []byte("last-ping-reply"), []byte("1021"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("6293"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("36490"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("172.17.0.2"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1316759")}, []interface{}{[]byte("name"), []byte("172.17.0.3:6379"), []byte("ip"), []byte("172.17.0.3"), []byte("port"), []byte("6379"), []byte("runid"), []byte("42ebb784f2bd560903de9fb7d4533263d5db558a"), []byte("flags"), []byte("s_down,slave"), []byte("link-pending-commands"), []byte("0"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("0"), []byte("last-ok-ping-reply"), []byte("655"), []byte("last-ping-reply"), []byte("655"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("6394"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("56525539"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("172.17.0.2"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1316759")}, []interface{}{[]byte("name"), []byte("172.17.0.5:6379"), []byte("ip"), []byte("172.17.0.5"), []byte("port"), []byte("6379"), []byte("runid"), []byte("8f4b14e820fab7b38cad640208803dfb9fa225ca"), []byte("flags"), []byte("o_down,s_down,slave"), []byte("link-pending-commands"), []byte("100"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("23792"), []byte("last-ok-ping-reply"), []byte("23902"), []byte("last-ping-reply"), []byte("23902"), []byte("s-down-time"), []byte("18785"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("26352"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("36493"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("redis-master"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1315493")}}, []string{"mymaster", "172.17.0.7:26379"}, oneOkSlaveExpectedMetricValue},
{"2/3 okay slave", []interface{}{[]interface{}{[]byte("name"), []byte("172.17.0.6:6379"), []byte("ip"), []byte("172.17.0.6"), []byte("port"), []byte("6379"), []byte("runid"), []byte("254576b435fcd73121a6497d3b03f3a464de9a10"), []byte("flags"), []byte("slave"), []byte("link-pending-commands"), []byte("0"), []byte("last-ok-ping-reply"), []byte("1021"), []byte("last-ping-reply"), []byte("1021"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("6293"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("36490"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("172.17.0.2"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1316759")}, []interface{}{[]byte("name"), []byte("172.17.0.3:6379"), []byte("ip"), []byte("172.17.0.3"), []byte("port"), []byte("6379"), []byte("runid"), []byte("42ebb784f2bd560903de9fb7d4533263d5db558a"), []byte("flags"), []byte("slave"), []byte("link-pending-commands"), []byte("0"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("0"), []byte("last-ok-ping-reply"), []byte("655"), []byte("last-ping-reply"), []byte("655"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("6394"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("56525539"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("172.17.0.2"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1316759")}, []interface{}{[]byte("name"), []byte("172.17.0.5:6379"), []byte("ip"), []byte("172.17.0.5"), []byte("port"), []byte("6379"), []byte("runid"), []byte("8f4b14e820fab7b38cad640208803dfb9fa225ca"), []byte("flags"), []byte("s_down,slave"), []byte("link-pending-commands"), []byte("100"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("23792"), []byte("last-ok-ping-reply"), []byte("23902"), []byte("last-ping-reply"), []byte("23902"), []byte("s-down-time"), []byte("18785"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("26352"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("36493"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("redis-master"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1315493")}}, []string{"mymaster", "172.17.0.7:26379"}, twoOkSlaveExpectedMetricValue},
{"2/3 okay slave(missing flags)", []interface{}{[]interface{}{[]byte("name"), []byte("172.17.0.6:6379"), []byte("ip"), []byte("172.17.0.6"), []byte("port"), []byte("6379"), []byte("runid"), []byte("254576b435fcd73121a6497d3b03f3a464de9a10"), []byte("flags"), []byte("slave"), []byte("link-pending-commands"), []byte("0"), []byte("last-ok-ping-reply"), []byte("1021"), []byte("last-ping-reply"), []byte("1021"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("6293"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("36490"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("172.17.0.2"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1316759")}, []interface{}{[]byte("name"), []byte("172.17.0.3:6379"), []byte("ip"), []byte("172.17.0.3"), []byte("port"), []byte("6379"), []byte("runid"), []byte("42ebb784f2bd560903de9fb7d4533263d5db558a"), []byte("flags"), []byte("slave"), []byte("link-pending-commands"), []byte("0"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("0"), []byte("last-ok-ping-reply"), []byte("655"), []byte("last-ping-reply"), []byte("655"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("6394"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("56525539"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("172.17.0.2"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1316759")}, []interface{}{[]byte("name"), []byte("172.17.0.5:6379"), []byte("ip"), []byte("172.17.0.5"), []byte("port"), []byte("6379"), []byte("runid"), []byte("8f4b14e820fab7b38cad640208803dfb9fa225ca"), []byte("link-pending-commands"), []byte("100"), []byte("link-refcount"), []byte("1"), []byte("last-ping-sent"), []byte("23792"), []byte("last-ok-ping-reply"), []byte("23902"), []byte("last-ping-reply"), []byte("23902"), []byte("s-down-time"), []byte("18785"), []byte("down-after-milliseconds"), []byte("5000"), []byte("info-refresh"), []byte("26352"), []byte("role-reported"), []byte("slave"), []byte("role-reported-time"), []byte("36493"), []byte("master-link-down-time"), []byte("0"), []byte("master-link-status"), []byte("ok"), []byte("master-host"), []byte("redis-master"), []byte("master-port"), []byte("6379"), []byte("slave-priority"), []byte("100"), []byte("slave-repl-offset"), []byte("1315493")}}, []string{"mymaster", "172.17.0.7:26379"}, twoOkSlaveExpectedMetricValue},
}
for _, tst := range tsts {
t.Run(tst.name, func(t *testing.T) {
chM := make(chan prometheus.Metric)
go func() {
e.processSentinelSlaves(chM, tst.slaveDetails, tst.labels...)
close(chM)
}()
want := map[string]bool{
"sentinel_master_ok_slaves": false,
}
for m := range chM {
for k := range want {
if strings.Contains(m.Desc().String(), k) {
want[k] = true
got := &dto.Metric{}
m.Write(got)
val := got.GetGauge().GetValue()
if int(val) != tst.expectedMetricValue[k] {
t.Errorf("Expected metric value %d didn't match to reported value %d for test %s", tst.expectedMetricValue[k], int(val), tst.name)
}
}
}
}
for k, found := range want {
if !found {
t.Errorf("didn't find metric %s", k)
}
}
})
}
}
|
[
"\"TEST_REDIS_SENTINEL_URI\"",
"\"TEST_REDIS_SENTINEL_URI\"",
"\"TEST_REDIS_URI\"",
"\"TEST_REDIS_URI\"",
"\"TEST_REDIS_SENTINEL_URI\"",
"\"TEST_REDIS_SENTINEL_URI\"",
"\"TEST_REDIS_SENTINEL_URI\"",
"\"TEST_REDIS_SENTINEL_URI\"",
"\"TEST_REDIS_SENTINEL_URI\"",
"\"TEST_REDIS_SENTINEL_URI\""
] |
[] |
[
"TEST_REDIS_URI",
"TEST_REDIS_SENTINEL_URI"
] |
[]
|
["TEST_REDIS_URI", "TEST_REDIS_SENTINEL_URI"]
|
go
| 2 | 0 | |
contrib/devtools/symbol-check.py
|
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the executables produced by gitian only contain
certain symbols and are only linked against allowed libraries.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import re
import sys
import os
from typing import List, Optional, Tuple
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases
#
# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all)
# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all)
#
# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General
#
# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.8.5: GCC_4.8.0
# (glibc) GLIBC_2_17
#
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': (2,17),
'LIBATOMIC': (1,0)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# ruvchaind and ruvchain-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# ruvchain-qt only
'libxcb.so.1', # part of X11
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
'80386': (2,1),
'X86-64': (2,2,5),
'ARM': (2,4),
'AArch64':(2,17),
'RISC-V': (2,27)
}
MACHO_ALLOWED_LIBRARIES = {
# ruvchaind and ruvchain-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# ruvchain-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'libobjc.A.dylib', # Objective-C runtime library
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'USER32.dll', # user interface
'WS2_32.dll', # sockets
# ruvchain-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'UxTheme.dll',
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True) -> List[Tuple[str, str, str]]:
'''
Parse an ELF executable and return a list of (symbol,version, arch) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for {}: {}'.format(executable, stderr.strip()))
syms = []
for line in stdout.splitlines():
line = line.split()
if 'Machine:' in line:
arch = line[-1]
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version, arch))
return syms
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
def elf_read_libraries(filename) -> List[str]:
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>2 and tokens[1] == '(NEEDED)':
match = re.match(r'^Shared library: \[(.*)\]$', ' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
def check_imported_symbols(filename) -> bool:
cppfilt = CPPFilt()
ok = True
for sym, version, arch in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version, arch):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
cppfilt = CPPFilt()
ok = True
for sym,version,arch in read_symbols(filename, False):
if arch == 'RISC-V' or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
ok = True
for library_name in elf_read_libraries(filename):
if library_name not in ELF_ALLOWED_LIBRARIES:
print('{}: NEEDED library {} is not allowed'.format(filename, library_name))
ok = False
return ok
def macho_read_libraries(filename) -> List[str]:
p = subprocess.Popen([OTOOL_CMD, '-L', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens) == 1: # skip executable name
continue
libraries.append(tokens[0].split('/')[-1])
return libraries
def check_MACHO_libraries(filename) -> bool:
ok = True
for dylib in macho_read_libraries(filename):
if dylib not in MACHO_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
ok = False
return ok
def pe_read_libraries(filename) -> List[str]:
p = subprocess.Popen([OBJDUMP_CMD, '-x', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
if 'DLL Name:' in line:
tokens = line.split(': ')
libraries.append(tokens[1])
return libraries
def check_PE_libraries(filename) -> bool:
ok = True
for dylib in pe_read_libraries(filename):
if dylib not in PE_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
ok = False
return ok
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries)
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries)
],
'PE' : [
('DYNAMIC_LIBRARIES', check_PE_libraries)
]
}
def identify_executable(executable) -> Optional[str]:
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
elif magic.startswith(b'\xcf\xfa'):
return 'MACHO'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('{}: unknown format'.format(filename))
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('{}: failed {}'.format(filename, ' '.join(failed)))
retval = 1
except IOError:
print('{}: cannot open'.format(filename))
retval = 1
sys.exit(retval)
|
[] |
[] |
[
"OBJDUMP",
"READELF",
"OTOOL",
"CPPFILT"
] |
[]
|
["OBJDUMP", "READELF", "OTOOL", "CPPFILT"]
|
python
| 4 | 0 | |
tests/configmap/main.go
|
/*
Copyright 2018 Google, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"io/ioutil"
"os"
"github.com/golang/glog"
)
func main() {
configData := os.Getenv("TEST_DATA")
b, err := ioutil.ReadFile("/config/test.data")
if err != nil {
glog.Fatalf("Unexpected error reading /config/test.data: %v", err)
}
otherConfigData := string(b)
if configData != otherConfigData {
glog.Fatalf("Unexpected mismatch in config data, want %q, but got %q", configData, otherConfigData)
}
}
|
[
"\"TEST_DATA\""
] |
[] |
[
"TEST_DATA"
] |
[]
|
["TEST_DATA"]
|
go
| 1 | 0 | |
GetAppsBuildCode.py
|
#!/usr/bin/env python
##############################################
# Look for applications in the apps dir
# that end in cpp.
# Print lines that should go in the cmt
# requirements file to build them.
#
###############################################
import os, glob, sys
if not os.environ["GENIEXSECEXTRACTROOT"]:
sys.exit( "You must setup the package first." )
for f in glob.glob( "%s/apps/*cpp" % os.environ["GENIEXSECEXTRACTROOT"] ):
app, ext = os.path.splitext( os.path.basename(f) )
print "application %(app)s ${GENIEXSECEXTRACTROOT}/apps/%(app)s.cpp" % locals()
|
[] |
[] |
[
"GENIEXSECEXTRACTROOT"
] |
[]
|
["GENIEXSECEXTRACTROOT"]
|
python
| 1 | 0 | |
madgraph/interface/common_run_interface.py
|
###############################################################################
#
# Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""A user friendly command line interface to access MadGraph5_aMC@NLO features.
Uses the cmd package for command interpretation and tab completion.
"""
from __future__ import division
import ast
import logging
import os
import re
import shutil
import signal
import stat
import subprocess
import sys
import time
import traceback
import urllib
import glob
import StringIO
try:
import readline
GNU_SPLITTING = ('GNU' in readline.__doc__)
except:
GNU_SPLITTING = True
root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0]
root_path = os.path.split(root_path)[0]
sys.path.insert(0, os.path.join(root_path,'bin'))
# usefull shortcut
pjoin = os.path.join
# Special logger for the Cmd Interface
logger = logging.getLogger('madgraph.stdout') # -> stdout
logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr
try:
import madgraph
except ImportError:
# import from madevent directory
import internal.extended_cmd as cmd
import internal.banner as banner_mod
import internal.shower_card as shower_card_mod
import internal.misc as misc
import internal.cluster as cluster
import internal.check_param_card as check_param_card
import internal.files as files
# import internal.histograms as histograms # imported later to not slow down the loading of the code
import internal.save_load_object as save_load_object
import internal.gen_crossxhtml as gen_crossxhtml
import internal.lhe_parser as lhe_parser
import internal.FO_analyse_card as FO_analyse_card
import internal.sum_html as sum_html
from internal import InvalidCmd, MadGraph5Error
MADEVENT=True
else:
# import from madgraph directory
import madgraph.interface.extended_cmd as cmd
import madgraph.various.banner as banner_mod
import madgraph.various.shower_card as shower_card_mod
import madgraph.various.misc as misc
import madgraph.iolibs.files as files
import madgraph.various.cluster as cluster
import madgraph.various.lhe_parser as lhe_parser
import madgraph.various.FO_analyse_card as FO_analyse_card
import madgraph.iolibs.save_load_object as save_load_object
import madgraph.madevent.gen_crossxhtml as gen_crossxhtml
import models.check_param_card as check_param_card
import madgraph.madevent.sum_html as sum_html
# import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code
from madgraph import InvalidCmd, MadGraph5Error, MG5DIR
MADEVENT=False
#===============================================================================
# HelpToCmd
#===============================================================================
class HelpToCmd(object):
""" The Series of help routins in common between amcatnlo_run and
madevent interface"""
def help_treatcards(self):
logger.info("syntax: treatcards [param|run] [--output_dir=] [--param_card=] [--run_card=]")
logger.info("-- create the .inc files containing the cards information." )
def help_set(self):
logger.info("syntax: set %s argument" % "|".join(self._set_options))
logger.info("-- set options")
logger.info(" stdout_level DEBUG|INFO|WARNING|ERROR|CRITICAL")
logger.info(" change the default level for printed information")
logger.info(" timeout VALUE")
logger.info(" (default 20) Seconds allowed to answer questions.")
logger.info(" Note that pressing tab always stops the timer.")
logger.info(" cluster_temp_path PATH")
logger.info(" (default None) Allow to perform the run in PATH directory")
logger.info(" This allow to not run on the central disk. This is not used")
logger.info(" by condor cluster (since condor has it's own way to prevent it).")
def help_plot(self):
logger.info("syntax: plot [RUN] [%s] [-f]" % '|'.join(self._plot_mode))
logger.info("-- create the plot for the RUN (current run by default)")
logger.info(" at the different stage of the event generation")
logger.info(" Note than more than one mode can be specified in the same command.")
logger.info(" This requires to have MadAnalysis and td installed.")
logger.info(" -f options: answer all question by default.")
def help_compute_widths(self):
logger.info("syntax: compute_widths Particle [Particles] [OPTIONS]")
logger.info("-- Compute the widths for the particles specified.")
logger.info(" By default, this takes the current param_card and overwrites it.")
logger.info(" Precision allows to define when to include three/four/... body decays (LO).")
logger.info(" If this number is an integer then all N-body decay will be included.")
logger.info(" Various options:\n")
logger.info(" --body_decay=X: Parameter to control the precision of the computation")
logger.info(" if X is an integer, we compute all channels up to X-body decay.")
logger.info(" if X <1, then we stop when the estimated error is lower than X.")
logger.info(" if X >1 BUT not an integer, then we X = N + M, with M <1 and N an integer")
logger.info(" We then either stop at the N-body decay or when the estimated error is lower than M.")
logger.info(" default: 4.0025")
logger.info(" --min_br=X: All channel which are estimated below this value will not be integrated numerically.")
logger.info(" default: precision (decimal part of the body_decay options) divided by four")
logger.info(" --precision_channel=X: requested numerical precision for each channel")
logger.info(" default: 0.01")
logger.info(" --path=X: path for param_card")
logger.info(" default: take value from the model")
logger.info(" --output=X: path where to write the resulting card. ")
logger.info(" default: overwrite input file. If no input file, write it in the model directory")
logger.info(" --nlo: Compute NLO width [if the model support it]")
def help_shower(self):
logger.info("syntax: shower [shower_name] [shower_options]")
logger.info("-- This is equivalent to running '[shower_name] [shower_options]'")
def help_pgs(self):
logger.info("syntax: pgs [RUN] [--run_options]")
logger.info("-- run pgs on RUN (current one by default)")
self.run_options_help([('-f','answer all question by default'),
('--tag=', 'define the tag for the pgs run'),
('--no_default', 'not run if pgs_card not present')])
def help_delphes(self):
logger.info("syntax: delphes [RUN] [--run_options]")
logger.info("-- run delphes on RUN (current one by default)")
self.run_options_help([('-f','answer all question by default'),
('--tag=', 'define the tag for the delphes run'),
('--no_default', 'not run if delphes_card not present')])
def help_decay_events(self, skip_syntax=False):
if not skip_syntax:
logger.info("syntax: decay_events [RUN]")
logger.info("This functionality allows for the decay of resonances")
logger.info("in a .lhe file, keeping track of the spin correlation effets.")
logger.info("BE AWARE OF THE CURRENT LIMITATIONS:")
logger.info(" (1) Only a succession of 2 body decay are currently allowed")
class CheckValidForCmd(object):
""" The Series of check routines in common between amcatnlo_run and
madevent interface"""
def check_set(self, args):
""" check the validity of the line"""
if len(args) < 2:
if len(args)==1 and "=" in args[0]:
args[:] = args[0].split("=",1)
else:
self.help_set()
raise self.InvalidCmd('set needs an option and an argument')
if args[0] not in self._set_options + self.options.keys():
self.help_set()
raise self.InvalidCmd('Possible options for set are %s' % \
(self._set_options+self.options.keys()))
if args[0] in ['stdout_level']:
if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \
and not args[1].isdigit():
raise self.InvalidCmd('output_level needs ' + \
'a valid level')
if args[0] in ['timeout']:
if not args[1].isdigit():
raise self.InvalidCmd('timeout values should be a integer')
def check_compute_widths(self, args):
"""check that the model is loadable and check that the format is of the
type: PART PATH --output=PATH -f --precision=N
return the model.
"""
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The automatic computations of widths requires that MG5 is installed on the system.
You can install it and set his path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import models.model_reader as model_reader
import models.import_ufo as import_ufo
except ImportError:
raise self.ConfigurationError, '''Can\'t load MG5.
The variable mg5_path should not be correctly configure.'''
ufo_path = pjoin(self.me_dir,'bin','internal', 'ufomodel')
# Import model
if not MADEVENT:
modelname = self.find_model_name()
#restrict_file = None
#if os.path.exists(pjoin(ufo_path, 'restrict_default.dat')):
# restrict_file = pjoin(ufo_path, 'restrict_default.dat')
force_CMS = self.mother and self.mother.options['complex_mass_scheme']
model = import_ufo.import_model(modelname, decay=True,
restrict=True, complex_mass_scheme=force_CMS)
else:
force_CMS = self.proc_characteristics['complex_mass_scheme']
model = import_ufo.import_model(pjoin(self.me_dir,'bin','internal',
'ufomodel'), decay=True, complex_mass_scheme=force_CMS)
# if not hasattr(model.get('particles')[0], 'partial_widths'):
# raise self.InvalidCmd, 'The UFO model does not include partial widths information. Impossible to compute widths automatically'
# check if the name are passed to default MG5
if '-modelname' not in open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat')).read():
model.pass_particles_name_in_mg_default()
model = model_reader.ModelReader(model)
particles_name = dict([(p.get('name'), p.get('pdg_code'))
for p in model.get('particles')])
particles_name.update(dict([(p.get('antiname'), p.get('pdg_code'))
for p in model.get('particles')]))
output = {'model': model, 'force': False, 'output': None,
'path':None, 'particles': set(), 'body_decay':4.0025,
'min_br':None, 'precision_channel':0.01}
for arg in args:
if arg.startswith('--output='):
output_path = arg.split('=',1)[1]
if not os.path.exists(output_path):
raise self.InvalidCmd, 'Invalid Path for the output. Please retry.'
if not os.path.isfile(output_path):
output_path = pjoin(output_path, 'param_card.dat')
output['output'] = output_path
elif arg == '-f':
output['force'] = True
elif os.path.isfile(arg):
ftype = self.detect_card_type(arg)
if ftype != 'param_card.dat':
raise self.InvalidCmd , '%s is not a valid param_card.' % arg
output['path'] = arg
elif arg.startswith('--path='):
arg = arg.split('=',1)[1]
ftype = self.detect_card_type(arg)
if ftype != 'param_card.dat':
raise self.InvalidCmd , '%s is not a valid param_card.' % arg
output['path'] = arg
elif arg.startswith('--'):
if "=" in arg:
name, value = arg.split('=',1)
try:
value = float(value)
except Exception:
raise self.InvalidCmd, '--%s requires integer or a float' % name
output[name[2:]] = float(value)
elif arg == "--nlo":
output["nlo"] = True
elif arg in particles_name:
# should be a particles
output['particles'].add(particles_name[arg])
elif arg.isdigit() and int(arg) in particles_name.values():
output['particles'].add(ast.literal_eval(arg))
elif arg == 'all':
output['particles'] = set(['all'])
else:
self.help_compute_widths()
raise self.InvalidCmd, '%s is not a valid argument for compute_widths' % arg
if self.force:
output['force'] = True
if not output['particles']:
raise self.InvalidCmd, '''This routines requires at least one particle in order to compute
the related width'''
if output['output'] is None:
output['output'] = output['path']
return output
def check_delphes(self, arg, nodefault=False):
"""Check the argument for pythia command
syntax: delphes [NAME]
Note that other option are already remove at this point
"""
# If not pythia-pgs path
if not self.options['delphes_path']:
logger.info('Retry to read configuration file to find delphes path')
self.set_configuration()
if not self.options['delphes_path']:
error_msg = 'No valid Delphes path set.\n'
error_msg += 'Please use the set command to define the path and retry.\n'
error_msg += 'You can also define it in the configuration file.\n'
raise self.InvalidCmd(error_msg)
tag = [a for a in arg if a.startswith('--tag=')]
if tag:
arg.remove(tag[0])
tag = tag[0][6:]
if len(arg) == 0 and not self.run_name:
if self.results.lastrun:
arg.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently define. Please add this information.')
if len(arg) == 1 and self.run_name == arg[0]:
arg.pop(0)
filepath = None
if not len(arg):
prev_tag = self.set_run_name(self.run_name, tag, 'delphes')
paths = [pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia_events.hep.gz'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia8_events.hepmc.gz'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia_events.hep'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia8_events.hepmc'),
pjoin(self.me_dir,'Events','pythia_events.hep'),
pjoin(self.me_dir,'Events','pythia_events.hepmc'),
pjoin(self.me_dir,'Events','pythia8_events.hep.gz'),
pjoin(self.me_dir,'Events','pythia8_events.hepmc.gz')
]
for p in paths:
if os.path.exists(p % {'tag': prev_tag}):
filepath = p % {'tag': prev_tag}
break
else:
a = raw_input("NO INPUT")
if nodefault:
return False
else:
self.help_pgs()
raise self.InvalidCmd('''No file file pythia_events.* currently available
Please specify a valid run_name''')
if len(arg) == 1:
prev_tag = self.set_run_name(arg[0], tag, 'delphes')
if os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)
else:
raise self.InvalidCmd('No events file corresponding to %s run with tag %s.:%s '\
% (self.run_name, prev_tag,
pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)))
else:
if tag:
self.run_card['run_tag'] = tag
self.set_run_name(self.run_name, tag, 'delphes')
return filepath
def check_open(self, args):
""" check the validity of the line """
if len(args) != 1:
self.help_open()
raise self.InvalidCmd('OPEN command requires exactly one argument')
if args[0].startswith('./'):
if not os.path.isfile(args[0]):
raise self.InvalidCmd('%s: not such file' % args[0])
return True
# if special : create the path.
if not self.me_dir:
if not os.path.isfile(args[0]):
self.help_open()
raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file')
else:
return True
path = self.me_dir
if os.path.isfile(os.path.join(path,args[0])):
args[0] = os.path.join(path,args[0])
elif os.path.isfile(os.path.join(path,'Cards',args[0])):
args[0] = os.path.join(path,'Cards',args[0])
elif os.path.isfile(os.path.join(path,'HTML',args[0])):
args[0] = os.path.join(path,'HTML',args[0])
# special for card with _default define: copy the default and open it
elif '_card.dat' in args[0]:
name = args[0].replace('_card.dat','_card_default.dat')
if os.path.isfile(os.path.join(path,'Cards', name)):
files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0]))
args[0] = os.path.join(path,'Cards', args[0])
else:
raise self.InvalidCmd('No default path for this file')
elif not os.path.isfile(args[0]):
raise self.InvalidCmd('No default path for this file')
def check_treatcards(self, args):
"""check that treatcards arguments are valid
[param|run|all] [--output_dir=] [--param_card=] [--run_card=]
"""
opt = {'output_dir':pjoin(self.me_dir,'Source'),
'param_card':pjoin(self.me_dir,'Cards','param_card.dat'),
'run_card':pjoin(self.me_dir,'Cards','run_card.dat')}
mode = 'all'
for arg in args:
if arg.startswith('--') and '=' in arg:
key,value =arg[2:].split('=',1)
if not key in opt:
self.help_treatcards()
raise self.InvalidCmd('Invalid option for treatcards command:%s ' \
% key)
if key in ['param_card', 'run_card']:
if os.path.isfile(value):
card_name = self.detect_card_type(value)
if card_name != key:
raise self.InvalidCmd('Format for input file detected as %s while expecting %s'
% (card_name, key))
opt[key] = value
elif os.path.isfile(pjoin(self.me_dir,value)):
card_name = self.detect_card_type(pjoin(self.me_dir,value))
if card_name != key:
raise self.InvalidCmd('Format for input file detected as %s while expecting %s'
% (card_name, key))
opt[key] = value
else:
raise self.InvalidCmd('No such file: %s ' % value)
elif key in ['output_dir']:
if os.path.isdir(value):
opt[key] = value
elif os.path.isdir(pjoin(self.me_dir,value)):
opt[key] = pjoin(self.me_dir, value)
else:
raise self.InvalidCmd('No such directory: %s' % value)
elif arg in ['MadLoop','param','run','all']:
mode = arg
else:
self.help_treatcards()
raise self.InvalidCmd('Unvalid argument %s' % arg)
return mode, opt
def check_decay_events(self,args):
"""Check the argument for decay_events command
syntax is "decay_events [NAME]"
Note that other option are already remove at this point
"""
opts = []
if '-from_cards' in args:
args.remove('-from_cards')
opts.append('-from_cards')
if len(args) == 0:
if self.run_name:
args.insert(0, self.run_name)
elif self.results.lastrun:
args.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently defined. Please add this information.')
return
if args[0] != self.run_name:
self.set_run_name(args[0])
args[0] = self.get_events_path(args[0])
args += opts
def check_check_events(self,args):
"""Check the argument for decay_events command
syntax is "decay_events [NAME]"
Note that other option are already remove at this point
"""
if len(args) == 0:
if self.run_name:
args.insert(0, self.run_name)
elif self.results.lastrun:
args.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently defined. Please add this information.')
return
if args[0] and os.path.isfile(args[0]):
pass
else:
if args[0] != self.run_name:
self.set_run_name(args[0], allow_new_tag=False)
args[0] = self.get_events_path(args[0])
def get_events_path(self, run_name):
"""return the path to the output events
"""
if self.mode == 'madevent':
possible_path = [
pjoin(self.me_dir,'Events', run_name, 'unweighted_events.lhe.gz'),
pjoin(self.me_dir,'Events', run_name, 'unweighted_events.lhe')]
else:
possible_path = [
pjoin(self.me_dir,'Events', run_name, 'events.lhe.gz'),
pjoin(self.me_dir,'Events', run_name, 'events.lhe')]
for path in possible_path:
if os.path.exists(path):
correct_path = path
break
else:
if os.path.exists(run_name):
correct_path = run_name
else:
raise self.InvalidCmd('No events file corresponding to %s run. ' % run_name)
return correct_path
class MadEventAlreadyRunning(InvalidCmd):
pass
class AlreadyRunning(MadEventAlreadyRunning):
pass
#===============================================================================
# CommonRunCmd
#===============================================================================
class CommonRunCmd(HelpToCmd, CheckValidForCmd, cmd.Cmd):
debug_output = 'ME5_debug'
helporder = ['Main Commands', 'Documented commands', 'Require MG5 directory',
'Advanced commands']
sleep_for_error = True
# The three options categories are treated on a different footage when a
# set/save configuration occur. current value are kept in self.options
options_configuration = {'pythia8_path': './pythia8',
'hwpp_path': './herwigPP',
'thepeg_path': './thepeg',
'hepmc_path': './hepmc',
'madanalysis_path': './MadAnalysis',
'madanalysis5_path': './HEPTools/madanalysis5',
'pythia-pgs_path':'./pythia-pgs',
'td_path':'./td',
'delphes_path':'./Delphes',
'exrootanalysis_path':'./ExRootAnalysis',
'syscalc_path': './SysCalc',
'lhapdf': 'lhapdf-config',
'timeout': 60,
'f2py_compiler':None,
'web_browser':None,
'eps_viewer':None,
'text_editor':None,
'fortran_compiler':None,
'cpp_compiler': None,
'auto_update':7,
'cluster_type': 'condor',
'cluster_status_update': (600, 30),
'cluster_nb_retry':1,
'cluster_local_path': None,
'cluster_retry_wait':300}
options_madgraph= {'stdout_level':None}
options_madevent = {'automatic_html_opening':True,
'notification_center':True,
'run_mode':2,
'cluster_queue':None,
'cluster_time':None,
'cluster_size':100,
'cluster_memory':None,
'nb_core': None,
'cluster_temp_path':None}
def __init__(self, me_dir, options, *args, **opts):
"""common"""
self.force_run = False # this flag force the run even if RunWeb is present
if 'force_run' in opts and opts['force_run']:
self.force_run = True
del opts['force_run']
cmd.Cmd.__init__(self, *args, **opts)
# Define current MadEvent directory
if me_dir is None and MADEVENT:
me_dir = root_path
if os.path.isabs(me_dir):
self.me_dir = me_dir
else:
self.me_dir = pjoin(os.getcwd(),me_dir)
self.options = options
self.param_card_iterator = [] #an placeholder containing a generator of paramcard for scanning
# usefull shortcut
self.status = pjoin(self.me_dir, 'status')
self.error = pjoin(self.me_dir, 'error')
self.dirbin = pjoin(self.me_dir, 'bin', 'internal')
# Check that the directory is not currently running_in_idle
if not self.force_run:
if os.path.exists(pjoin(me_dir,'RunWeb')):
message = '''Another instance of the program is currently running.
(for this exact same directory) Please wait that this is instance is
closed. If no instance is running, you can delete the file
%s and try again.''' % pjoin(me_dir,'RunWeb')
raise AlreadyRunning, message
else:
pid = os.getpid()
fsock = open(pjoin(me_dir,'RunWeb'),'w')
fsock.write(`pid`)
fsock.close()
self.gen_card_html()
self.to_store = []
self.run_name = None
self.run_tag = None
self.banner = None
# Load the configuration file
self.set_configuration()
self.configure_run_mode(self.options['run_mode'])
# Define self.proc_characteristics
self.get_characteristics()
if not self.proc_characteristics['ninitial']:
# Get number of initial states
nexternal = open(pjoin(self.me_dir,'Source','nexternal.inc')).read()
found = re.search("PARAMETER\s*\(NINCOMING=(\d)\)", nexternal)
self.ninitial = int(found.group(1))
else:
self.ninitial = self.proc_characteristics['ninitial']
def make_make_all_html_results(self, folder_names = [], jobs=[]):
return sum_html.make_all_html_results(self, folder_names, jobs)
############################################################################
def split_arg(self, line, error=False):
"""split argument and remove run_options"""
args = cmd.Cmd.split_arg(line)
for arg in args[:]:
if not arg.startswith('-'):
continue
elif arg == '-c':
self.configure_run_mode(1)
elif arg == '-m':
self.configure_run_mode(2)
elif arg == '-f':
self.force = True
elif not arg.startswith('--'):
if error:
raise self.InvalidCmd('%s argument cannot start with - symbol' % arg)
else:
continue
elif arg.startswith('--cluster'):
self.configure_run_mode(1)
elif arg.startswith('--multicore'):
self.configure_run_mode(2)
elif arg.startswith('--nb_core'):
self.options['nb_core'] = int(arg.split('=',1)[1])
self.configure_run_mode(2)
elif arg.startswith('--web'):
self.pass_in_web_mode()
self.configure_run_mode(1)
else:
continue
args.remove(arg)
return args
@misc.multiple_try(nb_try=5, sleep=2)
def load_results_db(self):
"""load the current results status"""
# load the current status of the directory
if os.path.exists(pjoin(self.me_dir,'HTML','results.pkl')):
try:
self.results = save_load_object.load_from_file(pjoin(self.me_dir,'HTML','results.pkl'))
except Exception:
#the pickle fail -> need to recreate the library
model = self.find_model_name()
process = self.process # define in find_model_name
self.results = gen_crossxhtml.AllResults(model, process, self.me_dir)
self.results.resetall(self.me_dir)
else:
try:
self.results.resetall(self.me_dir)
except Exception, error:
logger.debug(error)
# Maybe the format was updated -> try fresh
model = self.find_model_name()
process = self.process # define in find_model_name
self.results = gen_crossxhtml.AllResults(model, process, self.me_dir)
self.results.resetall(self.me_dir)
self.last_mode = ''
try:
self.last_mode = self.results[self.results.lastrun][-1]['run_mode']
except:
self.results.resetall(self.me_dir)
self.last_mode = ''
else:
model = self.find_model_name()
process = self.process # define in find_model_name
self.results = gen_crossxhtml.AllResults(model, process, self.me_dir)
self.results.resetall(self.me_dir)
self.last_mode=''
return self.results
############################################################################
def do_treatcards(self, line, amcatnlo=False):
"""Advanced commands: create .inc files from param_card.dat/run_card.dat"""
#ensure that the cluster/card are consistent
if hasattr(self, 'run_card'):
self.cluster.modify_interface(self)
else:
try:
self.cluster.modify_interface(self)
except Exception, error:
misc.sprint(str(error))
keepwidth = False
if '--keepwidth' in line:
keepwidth = True
line = line.replace('--keepwidth', '')
args = self.split_arg(line)
mode, opt = self.check_treatcards(args)
if mode in ['run', 'all']:
if not hasattr(self, 'run_card'):
run_card = banner_mod.RunCard(opt['run_card'])
else:
run_card = self.run_card
# add the conversion from the lhaid to the pdf set names
if amcatnlo and run_card['pdlabel']=='lhapdf':
pdfsetsdir=self.get_lhapdf_pdfsetsdir()
pdfsets=self.get_lhapdf_pdfsets_list(pdfsetsdir)
lhapdfsetname=[]
for lhaid in run_card['lhaid']:
if lhaid in pdfsets:
lhapdfsetname.append(pdfsets[lhaid]['filename'])
else:
raise MadGraph5Error("lhaid %s is not a valid PDF identification number. This can be due to the use of an outdated version of LHAPDF, or %s is not a LHAGlue number corresponding to a central PDF set (but rather one of the error sets)." % (lhaid,lhaid))
run_card['lhapdfsetname']=lhapdfsetname
run_card.write_include_file(opt['output_dir'])
if mode in ['MadLoop', 'all']:
if os.path.exists(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')):
self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir,
'Cards', 'MadLoopParams.dat'))
# write the output file
self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses",
"MadLoopParams.dat"))
if mode in ['param', 'all']:
if os.path.exists(pjoin(self.me_dir, 'Source', 'MODEL', 'mp_coupl.inc')):
param_card = check_param_card.ParamCardMP(opt['param_card'])
else:
param_card = check_param_card.ParamCard(opt['param_card'])
outfile = pjoin(opt['output_dir'], 'param_card.inc')
ident_card = pjoin(self.me_dir,'Cards','ident_card.dat')
if os.path.isfile(pjoin(self.me_dir,'bin','internal','ufomodel','restrict_default.dat')):
default = pjoin(self.me_dir,'bin','internal','ufomodel','restrict_default.dat')
elif os.path.isfile(pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat')):
default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat')
elif not os.path.exists(pjoin(self.me_dir,'bin','internal','ufomodel')):
fsock = open(pjoin(self.me_dir,'Source','param_card.inc'),'w')
fsock.write(' ')
fsock.close()
return
else:
subprocess.call(['python', 'write_param_card.py'],
cwd=pjoin(self.me_dir,'bin','internal','ufomodel'))
default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat')
if amcatnlo and not keepwidth:
# force particle in final states to have zero width
pids = self.get_pid_final_initial_states()
# check those which are charged under qcd
if not MADEVENT and pjoin(self.me_dir,'bin','internal') not in sys.path:
sys.path.insert(0,pjoin(self.me_dir,'bin','internal'))
#Ensure that the model that we are going to load is the current
#one.
to_del = [name for name in sys.modules.keys()
if name.startswith('internal.ufomodel')
or name.startswith('ufomodel')]
for name in to_del:
del(sys.modules[name])
import ufomodel as ufomodel
zero = ufomodel.parameters.ZERO
no_width = [p for p in ufomodel.all_particles
if (str(p.pdg_code) in pids or str(-p.pdg_code) in pids)
and p.color != 1 and p.width != zero]
done = []
for part in no_width:
if abs(part.pdg_code) in done:
continue
done.append(abs(part.pdg_code))
param = param_card['decay'].get((part.pdg_code,))
if param.value != 0:
logger.info('''For gauge cancellation, the width of \'%s\' has been set to zero.'''\
% part.name,'$MG:color:BLACK')
param.value = 0
param_card.write_inc_file(outfile, ident_card, default)
def get_model(self):
"""return the model related to this process"""
if self.options['mg5_path']:
sys.path.append(self.options['mg5_path'])
import models.import_ufo as import_ufo
complexmass = self.proc_characteristics['complex_mass_scheme']
with misc.MuteLogger(['madgraph.model'],[50]):
out= import_ufo.import_model(pjoin(self.me_dir,'bin','internal','ufomodel'),
complex_mass_scheme=complexmass)
return out
#elif self.mother:
# misc.sprint('Hum this is dangerous....')
# return self.mother._curr_model
else:
return None
def ask_edit_cards(self, cards, mode='fixed', plot=True, first_cmd=None):
""" """
if not self.options['madanalysis_path']:
plot = False
self.ask_edit_card_static(cards, mode, plot, self.options['timeout'],
self.ask, first_cmd=first_cmd)
@staticmethod
def ask_edit_card_static(cards, mode='fixed', plot=True,
timeout=0, ask=None, **opt):
if not ask:
ask = CommonRunCmd.ask
def path2name(path):
if '_card' in path:
return path.split('_card')[0]
elif path == 'delphes_trigger.dat':
return 'trigger'
elif path == 'input.lhco':
return 'lhco'
elif path == 'MadLoopParams.dat':
return 'MadLoopParams'
else:
raise Exception, 'Unknow cards name %s' % path
# Ask the user if he wants to edit any of the files
#First create the asking text
question = """Do you want to edit a card (press enter to bypass editing)?\n"""
possible_answer = ['0', 'done']
card = {0:'done'}
indent = max(len(path2name(card_name)) for card_name in cards)
question += '/'+'-'*60+'\\\n'
for i, card_name in enumerate(cards):
imode = path2name(card_name)
possible_answer.append(i+1)
possible_answer.append(imode)
question += '| %-77s|\n'%((' \x1b[31m%%s\x1b[0m. %%-%ds : \x1b[32m%%s\x1b[0m'%indent)%(i+1, imode, card_name))
card[i+1] = imode
if plot and not 'plot_card.dat' in cards:
question += '| %-77s|\n'%((' \x1b[31m9\x1b[0m. %%-%ds : \x1b[32mplot_card.dat\x1b[0m'%indent) % 'plot')
possible_answer.append(9)
possible_answer.append('plot')
card[9] = 'plot'
question += '\\'+'-'*60+'/\n'
if 'param_card.dat' in cards:
# Add the path options
question += ' you can also\n'
question += ' - enter the path to a valid card or banner.\n'
question += ' - use the \'set\' command to modify a parameter directly.\n'
question += ' The set option works only for param_card and run_card.\n'
question += ' Type \'help set\' for more information on this command.\n'
question += ' - call an external program (ASperGE/MadWidth/...).\n'
question += ' Type \'help\' for the list of available command\n'
else:
question += ' you can also\n'
question += ' - enter the path to a valid card.\n'
if 'transfer_card.dat' in cards:
question += ' - use the \'change_tf\' command to set a transfer functions.\n'
out = 'to_run'
while out not in ['0', 'done']:
out = ask(question, '0', possible_answer, timeout=int(1.5*timeout),
path_msg='enter path', ask_class = AskforEditCard,
cards=cards, mode=mode, **opt)
@staticmethod
def detect_card_type(path):
"""detect the type of the card. Return value are
banner
param_card.dat
run_card.dat
pythia_card.dat
pythia8_card.dat
plot_card.dat
pgs_card.dat
delphes_card.dat
delphes_trigger.dat
shower_card.dat [aMCatNLO]
FO_analyse_card.dat [aMCatNLO]
madspin_card.dat [MS]
transfer_card.dat [MW]
madweight_card.dat [MW]
madanalysis5_hadron_card.dat
madanalysis5_parton_card.dat
Please update the unit-test: test_card_type_recognition when adding
cards.
"""
fulltext = open(path).read(50000)
if fulltext == '':
logger.warning('File %s is empty' % path)
return 'unknown'
to_search = ['<MGVersion>', # banner
'<mg5proccard>'
'ParticlePropagator', # Delphes
'ExecutionPath',
'Treewriter',
'CEN_max_tracker',
'#TRIGGER CARD', # delphes_trigger.dat
'parameter set name', # pgs_card
'muon eta coverage',
'req_acc_FO',
'MSTP',
'b_stable',
'FO_ANALYSIS_FORMAT',
'MSTU',
'Begin Minpts',
'gridpack',
'ebeam1',
'block\s+mw_run',
'BLOCK',
'DECAY',
'launch',
'madspin',
'transfer_card\.dat',
'set',
'main:numberofevents', # pythia8,
'@MG5aMC skip_analysis', #MA5 --both--
'@MG5aMC\s*inputs\s*=\s*\*\.(?:hepmc|lhe)', #MA5 --both--
'@MG5aMC\s*reconstruction_name', # MA5 hadronique
'@MG5aMC' # MA5 hadronique
]
text = re.findall('(%s)' % '|'.join(to_search), fulltext, re.I)
text = [t.lower() for t in text]
if '<mgversion>' in text or '<mg5proccard>' in text:
return 'banner'
elif 'particlepropagator' in text or 'executionpath' in text or 'treewriter' in text:
return 'delphes_card.dat'
elif 'cen_max_tracker' in text:
return 'delphes_card.dat'
elif '@mg5amc' in text:
ma5_flag = [f[7:].strip() for f in text if f.startswith('@mg5amc')]
if any(f.startswith('reconstruction_name') for f in ma5_flag):
return 'madanalysis5_hadron_card.dat'
ma5_flag = [f.split('*.')[1] for f in ma5_flag if '*.' in f]
if any(f.startswith('lhe') for f in ma5_flag):
return 'madanalysis5_parton_card.dat'
if any(f.startswith(('hepmc','hep','stdhep','lhco')) for f in ma5_flag):
return 'madanalysis5_hadron_card.dat'
else:
return 'unknown'
elif '#trigger card' in text:
return 'delphes_trigger.dat'
elif 'parameter set name' in text:
return 'pgs_card.dat'
elif 'muon eta coverage' in text:
return 'pgs_card.dat'
elif 'mstp' in text and not 'b_stable' in text:
return 'pythia_card.dat'
elif 'begin minpts' in text:
return 'plot_card.dat'
elif ('gridpack' in text and 'ebeam1' in text) or \
('req_acc_fo' in text and 'ebeam1' in text):
return 'run_card.dat'
elif any(t.endswith('mw_run') for t in text):
return 'madweight_card.dat'
elif 'transfer_card.dat' in text:
return 'transfer_card.dat'
elif 'block' in text and 'decay' in text:
return 'param_card.dat'
elif 'b_stable' in text:
return 'shower_card.dat'
elif 'fo_analysis_format' in text:
return 'FO_analyse_card.dat'
elif 'main:numberofevents' in text:
return 'pythia8_card.dat'
elif 'launch' in text:
# need to separate madspin/reweight.
# decay/set can be in both...
if 'madspin' in text:
return 'madspin_card.dat'
if 'decay' in text:
# need to check if this a line like "decay w+" or "set decay"
if re.search("(^|;)\s*decay", fulltext):
return 'madspin_card.dat'
else:
return 'reweight_card.dat'
else:
return 'reweight_card.dat'
else:
return 'unknown'
############################################################################
def get_available_tag(self):
"""create automatically a tag"""
used_tags = [r['tag'] for r in self.results[self.run_name]]
i=0
while 1:
i+=1
if 'tag_%s' %i not in used_tags:
return 'tag_%s' % i
############################################################################
@misc.mute_logger(names=['madgraph.various.histograms',
'internal.histograms'],levels=[20,20])
def generate_Pythia8_HwU_plots(self, plot_root_path,
merging_scale_name, observable_name,
data_path):
"""Generated the HwU plots from Pythia8 driver output for a specific
observable."""
try:
import madgraph
except ImportError:
import internal.histograms as histograms
else:
import madgraph.various.histograms as histograms
# Make sure that the file is present
if not os.path.isfile(data_path):
return False
# Load the HwU file.
histos = histograms.HwUList(data_path, consider_reweights='ALL',run_id=0)
if len(histos)==0:
return False
# Now also plot the max vs min merging scale
merging_scales_available = [label[1] for label in \
histos[0].bins.weight_labels if
histograms.HwU.get_HwU_wgt_label_type(label)=='merging_scale']
if len(merging_scales_available)>=2:
min_merging_scale = min(merging_scales_available)
max_merging_scale = max(merging_scales_available)
else:
min_merging_scale = None
max_merging_scale = None
# jet_samples_to_keep = None means that all jet_samples are kept
histo_output_options = {
'format':'gnuplot',
'uncertainties':['scale','pdf','statistical',
'merging_scale','alpsfact'],
'ratio_correlations':True,
'arg_string':'Automatic plotting from MG5aMC',
'jet_samples_to_keep':None,
'use_band':['merging_scale','alpsfact'],
'auto_open':False
}
# alpsfact variation only applies to MLM
if not (int(self.run_card['ickkw'])==1):
histo_output_options['uncertainties'].pop(
histo_output_options['uncertainties'].index('alpsfact'))
histo_output_options['use_band'].pop(
histo_output_options['use_band'].index('alpsfact'))
histos.output(pjoin(plot_root_path,
'central_%s_%s_plots'%(merging_scale_name,observable_name)),
**histo_output_options)
for scale in merging_scales_available:
that_scale_histos = histograms.HwUList(
data_path, run_id=0, merging_scale=scale)
that_scale_histos.output(pjoin(plot_root_path,
'%s_%.3g_%s_plots'%(merging_scale_name,scale,observable_name)),
**histo_output_options)
# If several merging scales were specified, then it is interesting
# to compare the summed jet samples for the maximum and minimum
# merging scale available.
if not min_merging_scale is None:
min_scale_histos = histograms.HwUList(data_path,
consider_reweights=[], run_id=0,
merging_scale=min_merging_scale)
max_scale_histos = histograms.HwUList(data_path,
consider_reweights=[], run_id=0,
merging_scale=max_merging_scale)
# Give the histos types so that the plot labels look good
for histo in min_scale_histos:
if histo.type is None:
histo.type = '%s=%.4g'%(merging_scale_name, min_merging_scale)
else:
histo.type += '|%s=%.4g'%(merging_scale_name, min_merging_scale)
for histo in max_scale_histos:
if histo.type is None:
histo.type = '%s=%.4g'%(merging_scale_name, max_merging_scale)
else:
histo.type += '|%s=%.4g'%(merging_scale_name, max_merging_scale)
# Now plot and compare against oneanother the shape for the the two scales
histograms.HwUList(min_scale_histos+max_scale_histos).output(
pjoin(plot_root_path,'min_max_%s_%s_comparison'
%(merging_scale_name,observable_name)),
format='gnuplot',
uncertainties=[],
ratio_correlations=True,
arg_string='Automatic plotting from MG5aMC',
jet_samples_to_keep=None,
use_band=[],
auto_open=False)
return True
def gen_card_html(self):
""" """
devnull = open(os.devnull, 'w')
try:
misc.call(['./bin/internal/gen_cardhtml-pl'], cwd=self.me_dir,
stdout=devnull, stderr=devnull)
except Exception:
pass
devnull.close()
def create_plot(self, mode='parton', event_path=None, output=None, tag=None):
"""create the plot"""
if not tag:
tag = self.run_card['run_tag']
if mode != 'Pythia8':
madir = self.options['madanalysis_path']
td = self.options['td_path']
if not madir or not td or \
not os.path.exists(pjoin(self.me_dir, 'Cards', 'plot_card.dat')):
return False
else:
PY8_plots_root_path = pjoin(self.me_dir,'HTML',
self.run_name,'%s_PY8_plots'%tag)
if 'ickkw' in self.run_card:
if int(self.run_card['ickkw']) and mode == 'Pythia':
self.update_status('Create matching plots for Pythia', level='pythia')
# recover old data if none newly created
if not os.path.exists(pjoin(self.me_dir,'Events','events.tree')):
misc.gunzip(pjoin(self.me_dir,'Events',
self.run_name, '%s_pythia_events.tree.gz' % tag), keep=True,
stdout=pjoin(self.me_dir,'Events','events.tree'))
files.mv(pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_xsecs.tree'),
pjoin(self.me_dir,'Events','xsecs.tree'))
# Generate the matching plots
misc.call([self.dirbin+'/create_matching_plots.sh',
self.run_name, tag, madir],
stdout = os.open(os.devnull, os.O_RDWR),
cwd=pjoin(self.me_dir,'Events'))
#Clean output
misc.gzip(pjoin(self.me_dir,"Events","events.tree"),
stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_events.tree.gz'))
files.mv(pjoin(self.me_dir,'Events','xsecs.tree'),
pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_xsecs.tree'))
elif mode == 'Pythia8' and (int(self.run_card['ickkw'])==1 or \
self.run_card['ktdurham']>0.0 or self.run_card['ptlund']>0.0):
self.update_status('Create matching plots for Pythia8',
level='pythia8')
# Create the directory if not existing at this stage
if not os.path.isdir(PY8_plots_root_path):
os.makedirs(PY8_plots_root_path)
merging_scale_name = 'qCut' if int(self.run_card['ickkw'])==1 \
else 'TMS'
djr_path = pjoin(self.me_dir,'Events',
self.run_name, '%s_djrs.dat' % tag)
pt_path = pjoin(self.me_dir,'Events',
self.run_name, '%s_pts.dat' % tag)
for observable_name, data_path in [('djr',djr_path),
('pt',pt_path)]:
if not self.generate_Pythia8_HwU_plots(
PY8_plots_root_path, merging_scale_name,
observable_name,data_path):
return False
if mode == 'Pythia8':
plot_files = glob.glob(pjoin(PY8_plots_root_path,'*.gnuplot'))
if not misc.which('gnuplot'):
logger.warning("Install gnuplot to be able to view the plots"+\
" generated at :\n "+\
'\n '.join('%s.gnuplot'%p for p in plot_files))
return True
for plot in plot_files:
command = ['gnuplot',plot]
try:
subprocess.call(command,cwd=PY8_plots_root_path,stderr=subprocess.PIPE)
except Exception as e:
logger.warning("Automatic processing of the Pythia8 "+\
"merging plots with gnuplot failed. Try the"+\
" following command by hand:\n %s"%(' '.join(command))+\
"\nException was: %s"%str(e))
return False
plot_files = glob.glob(pjoin(PY8_plots_root_path,'*.pdf'))
if len(plot_files)>0:
# Add an html page
html = "<html>\n<head>\n<TITLE>PLOT FOR PYTHIA8</TITLE>"
html+= '<link rel=stylesheet href="../../mgstyle.css" type="text/css">\n</head>\n<body>\n'
html += "<h2> Plot for Pythia8 </h2>\n"
html += '<a href=../../../crossx.html>return to summary</a><br>'
html += "<table>\n<tr> <td> <b>Obs.</b> </td> <td> <b>Type of plot</b> </td> <td><b> PDF</b> </td> <td><b> input file</b> </td> </tr>\n"
def sorted_plots(elem):
name = os.path.basename(elem[1])
if 'central' in name:
return -100
if 'min_max' in name:
return -10
merging_re = re.match(r'^.*_(\d+)_.*$',name)
if not merging_re is None:
return int(merging_re.group(1))
else:
return 1e10
djr_plot_files = sorted(
(('DJR',p) for p in plot_files if '_djr_' in p),
key = sorted_plots)
pt_plot_files = sorted(
(('Pt',p) for p in plot_files if '_pt_' in p),
key = sorted_plots)
last_obs = None
for obs, one_plot in djr_plot_files+pt_plot_files:
if obs!=last_obs:
# Add a line between observables
html += "<tr><td></td></tr>"
last_obs = obs
name = os.path.basename(one_plot).replace('.pdf','')
short_name = name
for dummy in ['_plots','_djr','_pt']:
short_name = short_name.replace(dummy,'')
short_name = short_name.replace('_',' ')
if 'min max' in short_name:
short_name = "%s comparison with min/max merging scale"%obs
if 'central' in short_name:
short_name = "Merging uncertainty band around central scale"
html += "<tr><td>%(obs)s</td><td>%(sn)s</td><td> <a href=./%(n)s.pdf>PDF</a> </td><td> <a href=./%(n)s.HwU>HwU</a> <a href=./%(n)s.gnuplot>GNUPLOT</a> </td></tr>\n" %\
{'obs':obs, 'sn': short_name, 'n': name}
html += '</table>\n'
html += '<a href=../../../bin/internal/plot_djrs.py> Example of code to plot the above with matplotlib </a><br><br>'
html+='</body>\n</html>'
ff=open(pjoin(PY8_plots_root_path, 'index.html'),'w')
ff.write(html)
return True
if not event_path:
if mode == 'parton':
possibilities=[
pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'),
pjoin(self.me_dir, 'Events', 'unweighted_events.lhe.gz'),
pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'),
pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz')]
for event_path in possibilities:
if os.path.exists(event_path):
break
output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html')
elif mode == 'Pythia':
event_path = pjoin(self.me_dir, 'Events','pythia_events.lhe')
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_pythia_%s.html' % tag)
elif mode == 'PGS':
event_path = pjoin(self.me_dir, 'Events', self.run_name,
'%s_pgs_events.lhco' % tag)
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_pgs_%s.html' % tag)
elif mode == 'Delphes':
event_path = pjoin(self.me_dir, 'Events', self.run_name,'%s_delphes_events.lhco' % tag)
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_delphes_%s.html' % tag)
elif mode == "shower":
event_path = pjoin(self.me_dir, 'Events','pythia_events.lhe')
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_shower_%s.html' % tag)
if not self.options['pythia-pgs_path']:
return
else:
raise self.InvalidCmd, 'Invalid mode %s' % mode
elif mode == 'reweight' and not output:
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_%s.html' % tag)
if not os.path.exists(event_path):
if os.path.exists(event_path+'.gz'):
misc.gunzip('%s.gz' % event_path)
else:
raise self.InvalidCmd, 'Events file %s does not exist' % event_path
elif event_path.endswith(".gz"):
misc.gunzip(event_path)
event_path = event_path[:-3]
self.update_status('Creating Plots for %s level' % mode, level = mode.lower())
mode = mode.lower()
if mode not in ['parton', 'reweight']:
plot_dir = pjoin(self.me_dir, 'HTML', self.run_name,'plots_%s_%s' % (mode.lower(),tag))
elif mode == 'parton':
plot_dir = pjoin(self.me_dir, 'HTML', self.run_name,'plots_parton')
else:
plot_dir =pjoin(self.me_dir, 'HTML', self.run_name,'plots_%s' % (tag))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
files.ln(pjoin(self.me_dir, 'Cards','plot_card.dat'), plot_dir, 'ma_card.dat')
try:
proc = misc.Popen([os.path.join(madir, 'plot_events')],
stdout = open(pjoin(plot_dir, 'plot.log'),'w'),
stderr = subprocess.STDOUT,
stdin=subprocess.PIPE,
cwd=plot_dir)
proc.communicate('%s\n' % event_path)
del proc
#proc.wait()
misc.call(['%s/plot' % self.dirbin, madir, td],
stdout = open(pjoin(plot_dir, 'plot.log'),'a'),
stderr = subprocess.STDOUT,
cwd=plot_dir)
misc.call(['%s/plot_page-pl' % self.dirbin,
os.path.basename(plot_dir),
mode],
stdout = open(pjoin(plot_dir, 'plot.log'),'a'),
stderr = subprocess.STDOUT,
cwd=pjoin(self.me_dir, 'HTML', self.run_name))
shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'),
output)
logger.info("Plots for %s level generated, see %s" % \
(mode, output))
except OSError, error:
logger.error('fail to create plot: %s. Please check that MadAnalysis is correctly installed.' % error)
self.update_status('End Plots for %s level' % mode, level = mode.lower(),
makehtml=False)
return True
def run_hep2lhe(self, banner_path = None):
"""Run hep2lhe on the file Events/pythia_events.hep"""
if not self.options['pythia-pgs_path']:
raise self.InvalidCmd, 'No pythia-pgs path defined'
pydir = pjoin(self.options['pythia-pgs_path'], 'src')
eradir = self.options['exrootanalysis_path']
# Creating LHE file
if misc.is_executable(pjoin(pydir, 'hep2lhe')):
self.update_status('Creating shower LHE File (for plot)', level='pythia')
# Write the banner to the LHE file
out = open(pjoin(self.me_dir,'Events','pythia_events.lhe'), 'w')
#out.writelines('<LesHouchesEvents version=\"1.0\">\n')
out.writelines('<!--\n')
out.writelines('# Warning! Never use this file for detector studies!\n')
out.writelines('-->\n<!--\n')
if banner_path:
out.writelines(open(banner_path).read().replace('<LesHouchesEvents version="1.0">',''))
out.writelines('\n-->\n')
out.close()
self.cluster.launch_and_wait(self.dirbin+'/run_hep2lhe',
argument= [pydir],
cwd=pjoin(self.me_dir,'Events'),
stdout=os.devnull)
logger.info('Warning! Never use this lhe file for detector studies!')
# Creating ROOT file
if eradir and misc.is_executable(pjoin(eradir, 'ExRootLHEFConverter')):
self.update_status('Creating Pythia LHE Root File', level='pythia')
try:
misc.call([eradir+'/ExRootLHEFConverter',
'pythia_events.lhe',
pjoin(self.run_name, '%s_pythia_lhe_events.root' % self.run_tag)],
cwd=pjoin(self.me_dir,'Events'))
except Exception, error:
misc.sprint('ExRootLHEFConverter fails', str(error),
log=logger)
pass
def store_result(self):
"""Dummy routine, to be overwritten by daughter classes"""
pass
############################################################################
def help_systematics(self):
"""help for systematics command"""
logger.info("syntax: systematics RUN_NAME [OUTPUT] [options]",'$MG:color:BLACK')
logger.info("-- Run the systematics run on the RUN_NAME run.")
logger.info(" RUN_NAME can be a path to a lhef file.")
logger.info(" OUTPUT can be the path to the output lhe file, otherwise the input file will be overwritten")
logger.info("")
logger.info("options: (values written are the default)", '$MG:color:BLACK')
logger.info("")
logger.info(" --mur=0.5,1,2 # specify the values for renormalisation scale variation")
logger.info(" --muf=0.5,1,2 # specify the values for factorisation scale variation")
logger.info(" --alps=1 # specify the values for MLM emission scale variation (LO only)")
logger.info(" --dyn=-1,1,2,3,4 # specify the dynamical schemes to use.")
logger.info(" # -1 is the one used by the sample.")
logger.info(" # > 0 correspond to options of dynamical_scale_choice of the run_card.")
logger.info(" --pdf=errorset # specify the pdfs to use for pdf variation. (see below)")
logger.info(" --together=mur,muf,dyn # lists the parameter that must be varied simultaneously so as to ")
logger.info(" # compute the weights for all combinations of their variations.")
logger.info(" --from_card # use the information from the run_card (LO only).")
logger.info(" --remove_weights= # remove previously written weights matching the descriptions")
logger.info(" --keep_weights= # force to keep the weight even if in the list of remove_weights")
logger.info(" --start_id= # define the starting digit for the additial weight. If not specify it is determine automatically")
logger.info("")
logger.info(" Allowed value for the pdf options:", '$MG:color:BLACK')
logger.info(" central : Do not perform any pdf variation" )
logger.info(" errorset : runs over the all the members of the PDF set used to generate the events")
logger.info(" 244800 : runs over the associated set and all its members")
logger.info(" 244800@0 : runs over the central member of the associated set")
# logger.info(" 244800@X : runs over the Xth set of the associated error set")
logger.info(" CT10 : runs over the associated set and all its members")
logger.info(" CT10@0 : runs over the central member of the associated set")
logger.info(" CT10@X : runs over the Xth member of the associated PDF set")
logger.info(" XX,YY,ZZ : runs over the sets for XX,YY,ZZ (those three follows above syntax)")
logger.info("")
logger.info(" Allowed value for the keep/remove_wgts options:", '$MG:color:BLACK')
logger.info(" all : keep/remove all weights")
logger.info(" name : keep/remove that particular weight")
logger.info(" id1,id2 : keep/remove all the weights between those two values --included--")
logger.info(" PATTERN : keep/remove all the weights matching the (python) regular expression.")
logger.info(" note that multiple entry of those arguments are allowed")
def complete_systematics(self, text, line, begidx, endidx):
"""auto completion for the systematics command"""
args = self.split_arg(line[0:begidx], error=False)
options = ['--mur=', '--muf=', '--pdf=', '--dyn=','--alps=',
'--together=','--from_card ','--remove_wgts=',
'--keep_wgts=','--start_id=']
if len(args) == 1 and os.path.sep not in text:
#return valid run_name
data = misc.glob(pjoin('*','*events.lhe*'), pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
return self.list_completion(text, data, line)
elif len(args)==1:
#logger.warning('1args')
return self.path_completion(text,
os.path.join('.',*[a for a in args \
if a.endswith(os.path.sep)]))
elif len(args)==2 and os.path.sep in args[1]:
#logger.warning('2args %s', args[1])
return self.path_completion(text, '.')
elif not line.endswith(tuple(options)):
return self.list_completion(text, options)
############################################################################
def do_systematics(self, line):
""" syntax is 'systematics [INPUT [OUTPUT]] OPTIONS'
--mur=0.5,1,2
--muf=0.5,1,2
--alps=1
--dyn=-1
--together=mur,muf #can be repeated
#special options
--from_card=
"""
try:
lhapdf_version = self.get_lhapdf_version()
except Exception:
logger.info('No version of lhapdf. Can not run systematics computation')
return
else:
if lhapdf_version.startswith('5'):
logger.info('can not run systematics with lhapdf 5')
return
lhapdf = misc.import_python_lhapdf(self.options['lhapdf'])
if not lhapdf:
logger.info('can not run systematics since can not link python to lhapdf')
return
self.update_status('Running Systematics computation', level='parton')
args = self.split_arg(line)
#split arguments and option
opts= []
args = [a for a in args if not a.startswith('-') or opts.append(a)]
#check sanity of options
if any(not o.startswith(('--mur=', '--muf=', '--alps=','--dyn=','--together=','--from_card','--pdf=',
'--remove_wgts=', '--keep_wgts','--start_id='))
for o in opts):
raise self.InvalidCmd, "command systematics called with invalid option syntax. Please retry."
# check that we have define the input
if len(args) == 0:
if self.run_name:
args[0] = self.run_name
else:
raise self.InvalidCmd, 'no default run. Please specify the run_name'
if args[0] != self.run_name:
self.set_run_name(args[0])
# always pass to a path + get the event size
result_file= sys.stdout
if not os.path.isfile(args[0]) and not os.path.sep in args[0]:
path = [pjoin(self.me_dir, 'Events', args[0], 'unweighted_events.lhe.gz'),
pjoin(self.me_dir, 'Events', args[0], 'unweighted_events.lhe'),
pjoin(self.me_dir, 'Events', args[0], 'events.lhe.gz'),
pjoin(self.me_dir, 'Events', args[0], 'events.lhe')]
for p in path:
if os.path.exists(p):
nb_event = self.results[args[0]].get_current_info()['nb_event']
if self.run_name != args[0]:
tag = self.results[args[0]].tags[0]
self.set_run_name(args[0], tag,'parton', False)
result_file = open(pjoin(self.me_dir,'Events', self.run_name, 'parton_systematics.log'),'w')
args[0] = p
break
else:
raise self.InvalidCmd, 'Invalid run name. Please retry'
elif self.options['nb_core'] != 1:
lhe = lhe_parser.EventFile(args[0])
nb_event = len(lhe)
lhe.close()
input = args[0]
if len(args)>1:
output = pjoin(os.getcwd(),args[1])
else:
output = input
lhaid = [self.run_card.get_lhapdf_id()]
if 'store_rwgt_info' in self.run_card and not self.run_card['store_rwgt_info']:
raise self.InvalidCmd, "The events was not generated with store_rwgt_info=True. Can not evaluate systematics error on this event file."
elif 'use_syst' in self.run_card:
if not self.run_card['use_syst']:
raise self.InvalidCmd, "The events was not generated with use_syst=True. Can not evaluate systematics error on this event file."
elif self.proc_characteristics['ninitial'] ==1:
if '--from_card' in opts:
logger.warning('systematics not available for decay processes. Bypass it')
return
else:
raise self.InvalidCmd, 'systematics not available for decay processes.'
try:
pdfsets_dir = self.get_lhapdf_pdfsetsdir()
except Exception, error:
logger.debug(str(error))
logger.warning('Systematic computation requires lhapdf to run. Bypass Systematics')
return
if '--from_card' in opts:
opts.remove('--from_card')
opts.append('--from_card=internal')
# Check that all pdfset are correctly installed
if 'sys_pdf' in self.run_card:
if '&&' in self.run_card['sys_pdf']:
line = ' '.join(self.run_card['sys_pdf'])
sys_pdf = line.split('&&')
lhaid += [l.split()[0] for l in sys_pdf]
else:
lhaid += [l for l in self.run_card['sys_pdf'].split() if not l.isdigit() or int(l) > 500]
else:
#check that all p
pdf = [a[6:] for a in opts if a.startswith('--pdf=')]
lhaid += [t.split('@')[0] for p in pdf for t in p.split(',')
if t not in ['errorset', 'central']]
# Copy all the relevant PDF sets
try:
[self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid]
except Exception, error:
logger.debug(str(error))
logger.warning('impossible to download all the pdfsets. Bypass systematics')
return
if self.options['run_mode'] ==2:
nb_submit = min(self.options['nb_core'], nb_event//2500)
elif self.options['run_mode'] ==1:
nb_submit = min(self.options['cluster_size'], nb_event//25000)
else:
nb_submit =1
if MADEVENT:
import internal.systematics as systematics
else:
import madgraph.various.systematics as systematics
#one core:
if nb_submit in [0,1]:
systematics.call_systematics([input, output] + opts,
log=lambda x: logger.info(str(x)),
result=result_file
)
elif self.options['run_mode'] in [1,2]:
event_per_job = nb_event // nb_submit
nb_job_with_plus_one = nb_event % nb_submit
start_event, stop_event = 0,0
for i in range(nb_submit):
#computing start/stop event
event_requested = event_per_job
if i < nb_job_with_plus_one:
event_requested += 1
start_event = stop_event
stop_event = start_event + event_requested
prog = sys.executable
input_files = [os.path.basename(input)]
output_files = ['./tmp_%s_%s' % (i, os.path.basename(output)),
'./log_sys_%s.txt' % (i)]
argument = []
if not __debug__:
argument.append('-O')
argument += [pjoin(self.me_dir, 'bin', 'internal', 'systematics.py'),
input_files[0], output_files[0]] + opts +\
['--start_event=%i' % start_event,
'--stop_event=%i' %stop_event,
'--result=./log_sys_%s.txt' %i,
'--lhapdf_config=%s' % self.options['lhapdf']]
required_output = output_files
self.cluster.cluster_submit(prog, argument,
input_files=input_files,
output_files=output_files,
cwd=os.path.dirname(output),
required_output=required_output,
stdout='/dev/null'
)
starttime = time.time()
update_status = lambda idle, run, finish: \
self.update_status((idle, run, finish, 'running systematics'), level=None,
force=False, starttime=starttime)
try:
self.cluster.wait(os.path.dirname(output), update_status, update_first=update_status)
except Exception:
self.cluster.remove()
old_run_mode = self.options['run_mode']
self.options['run_mode'] =0
try:
out = self.do_systematics(line)
finally:
self.options['run_mode'] = old_run_mode
#collect the data
all_cross = []
for i in range(nb_submit):
pos=0
for line in open(pjoin(os.path.dirname(output), 'log_sys_%s.txt'%i)):
if line.startswith('#'):
continue
split = line.split()
if len(split) in [0,1]:
continue
key = tuple(float(x) for x in split[:-1])
cross= float(split[-1])
if 'event_norm' in self.run_card and \
self.run_card['event_norm'] in ['average', 'unity', 'bias']:
cross *= (event_per_job+1 if i <nb_job_with_plus_one else event_per_job)
if len(all_cross) > pos:
all_cross[pos] += cross
else:
all_cross.append(cross)
pos+=1
if 'event_norm' in self.run_card and \
self.run_card['event_norm'] in ['unity']:
all_cross= [cross/nb_event for cross in all_cross]
sys_obj = systematics.call_systematics([input, None] + opts,
log=lambda x: logger.info(str(x)),
result=result_file,
running=False
)
sys_obj.print_cross_sections(all_cross, nb_event, result_file)
#concatenate the output file
subprocess.call(['cat']+\
['./tmp_%s_%s' % (i, os.path.basename(output)) for i in range(nb_submit)],
stdout=open(output,'w'),
cwd=os.path.dirname(output))
for i in range(nb_submit):
os.remove('%s/tmp_%s_%s' %(os.path.dirname(output),i,os.path.basename(output)))
# os.remove('%s/log_sys_%s.txt' % (os.path.dirname(output),i))
self.update_status('End of systematics computation', level='parton', makehtml=False)
############################################################################
def do_reweight(self, line):
""" syntax is "reweight RUN_NAME"
Allow to reweight the events generated with a new choices of model
parameter. Description of the methods are available here
cp3.irmp.ucl.ac.be/projects/madgraph/wiki/Reweight
"""
#### Utility function
def check_multicore(self):
""" determine if the cards are save for multicore use"""
card = pjoin(self.me_dir, 'Cards', 'reweight_card.dat')
multicore = True
if self.options['run_mode'] in [0,1]:
multicore = False
lines = [l.strip() for l in open(card) if not l.strip().startswith('#')]
while lines and not lines[0].startswith('launch'):
line = lines.pop(0)
# if not standard output mode forbid multicore mode
if line.startswith('change') and line[6:].strip().startswith('output'):
return False
if line.startswith('change') and line[6:].strip().startswith('multicore'):
split_line = line.split()
if len(split_line) > 2:
multicore = bool(split_line[2])
# we have reached the first launch in the card ensure that no output change
#are done after that point.
lines = [line[6:].strip() for line in lines if line.startswith('change')]
for line in lines:
if line.startswith(('process','model','output', 'rwgt_dir')):
return False
elif line.startswith('multicore'):
split_line = line.split()
if len(split_line) > 1:
multicore = bool(split_line[1])
return multicore
if '-from_cards' in line and not os.path.exists(pjoin(self.me_dir, 'Cards', 'reweight_card.dat')):
return
# option for multicore to avoid that all of them create the same directory
if '--multicore=create' in line:
multicore='create'
elif '--multicore=wait' in line:
multicore='wait'
else:
multicore=False
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The module reweight requires that MG5 is installed on the system.
You can install it and set its path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import madgraph.interface.reweight_interface as reweight_interface
except ImportError:
raise self.ConfigurationError, '''Can\'t load Reweight module.
The variable mg5_path might not be correctly configured.'''
if not '-from_cards' in line:
self.keep_cards(['reweight_card.dat'], ignore=['*'])
self.ask_edit_cards(['reweight_card.dat'], 'fixed', plot=False)
# load the name of the event file
args = self.split_arg(line)
if not self.force_run:
# forbid this function to create an empty item in results.
if self.run_name and self.results.current and self.results.current['cross'] == 0:
self.results.delete_run(self.run_name, self.run_tag)
self.results.save()
# ensure that the run_card is present
if not hasattr(self, 'run_card'):
self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat'))
# we want to run this in a separate shell to avoid hard f2py crash
command = [sys.executable]
if os.path.exists(pjoin(self.me_dir, 'bin', 'madevent')):
command.append(pjoin(self.me_dir, 'bin', 'internal','madevent_interface.py'))
else:
command.append(pjoin(self.me_dir, 'bin', 'internal', 'amcatnlo_run_interface.py'))
if not isinstance(self, cmd.CmdShell):
command.append('--web')
command.append('reweight')
######### START SINGLE CORE MODE ############
if self.options['nb_core']==1 or self.run_card['nevents'] < 101 or not check_multicore(self):
if self.run_name:
command.append(self.run_name)
else:
command += args
if '-from_cards' not in command:
command.append('-from_cards')
p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=os.getcwd())
while p.poll() is None:
line = p.stdout.readline()
if any(t in line for t in ['INFO:', 'WARNING:', 'CRITICAL:', 'ERROR:', 'root:','KEEP:']) and \
not '***********' in line:
print line[:-1].replace('INFO', 'REWEIGHT').replace('KEEP:','')
elif __debug__ and line:
logger.debug(line[:-1])
if p.returncode !=0:
logger.error("Reweighting failed")
return
self.results = self.load_results_db()
# forbid this function to create an empty item in results.
try:
if self.results[self.run_name][-2]['cross']==0:
self.results.delete_run(self.run_name,self.results[self.run_name][-2]['tag'])
except:
pass
try:
if self.results.current['cross'] == 0 and self.run_name:
self.results.delete_run(self.run_name, self.run_tag)
except:
pass
# re-define current run
try:
self.results.def_current(self.run_name, self.run_tag)
except Exception:
pass
return
########## END SINGLE CORE HANDLING #############
else:
########## START MULTI-CORE HANDLING #############
if not isinstance(self.cluster, cluster.MultiCore):
mycluster = cluster.MultiCore(nb_core=self.options['nb_core'])
else:
mycluster = self.cluster
new_args=list(args)
self.check_decay_events(new_args)
try:
os.remove(pjoin(self.me_dir,'rw_me','rwgt.pkl'))
except Exception, error:
pass
# prepare multi-core job:
import madgraph.various.lhe_parser as lhe_parser
# args now alway content the path to the valid files
if 'nevt_job' in self.run_card and self.run_card['nevt_job'] !=-1:
nevt_job = self.run_card['nevt_job']
else:
nevt_job = max(2500, self.run_card['nevents']/self.options['nb_core'])
logger.info("split the event file in bunch of %s events" % nevt_job)
nb_file = lhe_parser.EventFile(new_args[0]).split(nevt_job)
starttime = time.time()
update_status = lambda idle, run, finish: \
self.update_status((idle, run, finish, 'reweight'), level=None,
force=False, starttime=starttime)
all_lhe = []
devnull= open(os.devnull)
for i in range(nb_file):
new_command = list(command)
new_command.append('%s_%s.lhe' % (new_args[0],i))
all_lhe.append('%s_%s.lhe' % (new_args[0],i))
if '-from_cards' not in command:
new_command.append('-from_cards')
if i==0:
if __debug__:
stdout = None
else:
stdout = open(pjoin(self.me_dir,'Events', self.run_name, 'reweight.log'),'w')
new_command.append('--multicore=create')
else:
stdout = devnull
#stdout = open(pjoin(self.me_dir,'Events', self.run_name, 'reweight%s.log' % i),'w')
new_command.append('--multicore=wait')
mycluster.submit(prog=command[0], argument=new_command[1:], stdout=stdout, cwd=os.getcwd())
mycluster.wait(self.me_dir,update_status)
devnull.close()
logger.info("Collect and combine the various output file.")
lhe = lhe_parser.MultiEventFile(all_lhe, parse=False)
nb_event, cross_sections = lhe.write(new_args[0], get_info=True)
if any(os.path.exists('%s_%s_debug.log' % (f, self.run_tag)) for f in all_lhe):
for f in all_lhe:
if os.path.exists('%s_%s_debug.log' % (f, self.run_tag)):
raise Exception, "Some of the run failed: Please read %s_%s_debug.log" % (f, self.run_tag)
if 'event_norm' in self.run_card and self.run_card['event_norm'] in ['average','bias']:
for key, value in cross_sections.items():
cross_sections[key] = value / (nb_event+1)
lhe.remove()
for key in cross_sections:
if key == 'orig' or key.isdigit():
continue
logger.info('%s : %s pb' % (key, cross_sections[key]))
return
########## END MULTI-CORE HANDLING #############
self.to_store.append('event')
# forbid this function to create an empty item in results.
if not self.force_run and self.results.current['cross'] == 0 and self.run_name:
self.results.delete_run(self.run_name, self.run_tag)
self.check_decay_events(args)
# args now alway content the path to the valid files
reweight_cmd = reweight_interface.ReweightInterface(args[0], mother=self)
#reweight_cmd.use_rawinput = False
#reweight_cmd.mother = self
wgt_names = reweight_cmd.get_weight_names()
if wgt_names == [''] and reweight_cmd.has_nlo:
self.update_status('Running Reweighting (LO approximate)', level='madspin')
else:
self.update_status('Running Reweighting', level='madspin')
path = pjoin(self.me_dir, 'Cards', 'reweight_card.dat')
reweight_cmd.raw_input=False
reweight_cmd.me_dir = self.me_dir
reweight_cmd.multicore = multicore #allow the directory creation or not
print "We are in mode", multicore
reweight_cmd.import_command_file(path)
reweight_cmd.do_quit('')
logger.info("quit rwgt")
# re-define current run
try:
self.results.def_current(self.run_name, self.run_tag)
except Exception:
pass
############################################################################
def do_pgs(self, line):
"""launch pgs"""
args = self.split_arg(line)
# Check argument's validity
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default and not os.path.exists(pjoin(self.me_dir, 'Cards', 'pgs_card.dat')):
logger.info('No pgs_card detected, so not run pgs')
return
# Check all arguments
# This might launch a gunzip in another thread. After the question
# This thread need to be wait for completion. (This allow to have the
# question right away and have the computer working in the same time)
# if lock is define this a locker for the completion of the thread
lock = self.check_pgs(args, no_default=no_default)
# Check that the pgs_card exists. If not copy the default
if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pgs_card.dat')):
files.cp(pjoin(self.me_dir, 'Cards', 'pgs_card_default.dat'),
pjoin(self.me_dir, 'Cards', 'pgs_card.dat'))
logger.info('No pgs card found. Take the default one.')
if not (no_default or self.force):
self.ask_edit_cards(['pgs_card.dat'])
self.update_status('prepare PGS run', level=None)
pgsdir = pjoin(self.options['pythia-pgs_path'], 'src')
eradir = self.options['exrootanalysis_path']
madir = self.options['madanalysis_path']
td = self.options['td_path']
# Compile pgs if not there
if not misc.is_executable(pjoin(pgsdir, 'pgs')):
logger.info('No PGS executable -- running make')
misc.compile(cwd=pgsdir)
self.update_status('Running PGS', level='pgs')
tag = self.run_tag
# Update the banner with the pgs card
banner_path = pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, self.run_tag))
if os.path.exists(pjoin(self.me_dir, 'Source', 'banner_header.txt')):
self.banner.add(pjoin(self.me_dir, 'Cards','pgs_card.dat'))
self.banner.write(banner_path)
else:
open(banner_path, 'w').close()
########################################################################
# now pass the event to a detector simulator and reconstruct objects
########################################################################
if lock:
lock.wait()
# Prepare the output file with the banner
ff = open(pjoin(self.me_dir, 'Events', 'pgs_events.lhco'), 'w')
if os.path.exists(pjoin(self.me_dir, 'Source', 'banner_header.txt')):
text = open(banner_path).read()
text = '#%s' % text.replace('\n','\n#')
dico = self.results[self.run_name].get_current_info()
text +='\n## Integrated weight (pb) : %.4g' % dico['cross']
text +='\n## Number of Event : %s\n' % dico['nb_event']
ff.writelines(text)
ff.close()
try:
os.remove(pjoin(self.me_dir, 'Events', 'pgs.done'))
except Exception:
pass
pgs_log = pjoin(self.me_dir, 'Events', self.run_name, "%s_pgs.log" % tag)
self.cluster.launch_and_wait('../bin/internal/run_pgs',
argument=[pgsdir], cwd=pjoin(self.me_dir,'Events'),
stdout=pgs_log, stderr=subprocess.STDOUT)
if not os.path.exists(pjoin(self.me_dir, 'Events', 'pgs.done')):
logger.error('Fail to create LHCO events')
return
else:
os.remove(pjoin(self.me_dir, 'Events', 'pgs.done'))
if os.path.getsize(banner_path) == os.path.getsize(pjoin(self.me_dir, 'Events','pgs_events.lhco')):
misc.call(['cat pgs_uncleaned_events.lhco >> pgs_events.lhco'],
cwd=pjoin(self.me_dir, 'Events'))
os.remove(pjoin(self.me_dir, 'Events', 'pgs_uncleaned_events.lhco '))
# Creating Root file
if eradir and misc.is_executable(pjoin(eradir, 'ExRootLHCOlympicsConverter')):
self.update_status('Creating PGS Root File', level='pgs')
try:
misc.call([eradir+'/ExRootLHCOlympicsConverter',
'pgs_events.lhco',pjoin('%s/%s_pgs_events.root' % (self.run_name, tag))],
cwd=pjoin(self.me_dir, 'Events'))
except Exception:
logger.warning('fail to produce Root output [problem with ExRootAnalysis')
if os.path.exists(pjoin(self.me_dir, 'Events', 'pgs_events.lhco')):
# Creating plots
files.mv(pjoin(self.me_dir, 'Events', 'pgs_events.lhco'),
pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % tag))
self.create_plot('PGS')
misc.gzip(pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % tag))
self.update_status('finish', level='pgs', makehtml=False)
############################################################################
def do_compute_widths(self, line):
"""Require MG5 directory: Compute automatically the widths of a set
of particles"""
args = self.split_arg(line)
opts = self.check_compute_widths(args)
from madgraph.interface.master_interface import MasterCmd
cmd = MasterCmd()
self.define_child_cmd_interface(cmd, interface=False)
cmd.exec_cmd('set automatic_html_opening False --no_save')
if not opts['path']:
opts['path'] = pjoin(self.me_dir, 'Cards', 'param_card.dat')
if not opts['force'] :
self.ask_edit_cards(['param_card'],[], plot=False)
line = 'compute_widths %s %s' % \
(' '.join([str(i) for i in opts['particles']]),
' '.join('--%s=%s' % (key,value) for (key,value) in opts.items()
if key not in ['model', 'force', 'particles'] and value))
cmd.exec_cmd(line, model=opts['model'])
self.child = None
del cmd
############################################################################
def do_print_results(self, line):
"""Not in help:Print the cross-section/ number of events for a given run"""
args = self.split_arg(line)
options={'path':None, 'mode':'w', 'format':'full'}
for arg in list(args):
if arg.startswith('--') and '=' in arg:
name,value=arg.split('=',1)
name = name [2:]
options[name] = value
args.remove(arg)
if len(args) > 0:
run_name = args[0]
else:
for i, run_name in enumerate(self.results.order):
for j, one_result in enumerate(self.results[run_name]):
if i or j:
options['mode'] = "a"
if options['path']:
self.print_results_in_file(one_result, options['path'], options['mode'], options['format'])
else:
self.print_results_in_shell(one_result)
return
if run_name not in self.results:
raise self.InvalidCmd('%s is not a valid run_name or it doesn\'t have any information' \
% run_name)
if len(args) == 2:
tag = args[1]
if tag.isdigit():
tag = int(tag) - 1
if len(self.results[run_name]) < tag:
raise self.InvalidCmd('Only %s different tag available' % \
len(self.results[run_name]))
data = self.results[run_name][tag]
else:
data = self.results[run_name].return_tag(tag)
else:
data = self.results[run_name].return_tag(None) # return the last
if options['path']:
self.print_results_in_file(data, options['path'], options['mode'], options['format'])
else:
self.print_results_in_shell(data)
def configure_directory(self, *args, **opts):
""" All action require before any type of run. Typically overloaded by
daughters if need be."""
pass
############################################################################
# Start of MadAnalysis5 related function
############################################################################
@staticmethod
def runMA5(MA5_interpreter, MA5_cmds, MA5_runtag, logfile_path, advertise_log=True):
""" Run MA5 in a controlled environnment."""
successfull_MA5_run = True
try:
# Predefine MA5_logger as None in case we don't manage to retrieve it.
MA5_logger = None
MA5_logger = logging.getLogger('MA5')
BackUp_MA5_handlers = MA5_logger.handlers
for handler in BackUp_MA5_handlers:
MA5_logger.removeHandler(handler)
file_handler = logging.FileHandler(logfile_path)
MA5_logger.addHandler(file_handler)
if advertise_log:
logger.info("Follow Madanalysis5 run with the following command in a separate terminal:")
logger.info(' tail -f %s'%logfile_path)
# Now the magic, finally call MA5.
with misc.stdchannel_redirected(sys.stdout, os.devnull):
with misc.stdchannel_redirected(sys.stderr, os.devnull):
MA5_interpreter.print_banner()
MA5_interpreter.load(MA5_cmds)
except Exception as e:
logger.warning("MadAnalysis5 failed to run the commands for task "+
"'%s'. Madanalys5 analysis will be skipped."%MA5_runtag)
error=StringIO.StringIO()
traceback.print_exc(file=error)
logger.debug('MadAnalysis5 error was:')
logger.debug('-'*60)
logger.debug(error.getvalue()[:-1])
logger.debug('-'*60)
successfull_MA5_run = False
finally:
if not MA5_logger is None:
for handler in MA5_logger.handlers:
MA5_logger.removeHandler(handler)
for handler in BackUp_MA5_handlers:
MA5_logger.addHandler(handler)
return successfull_MA5_run
#===============================================================================
# Return a Main instance of MadAnlysis5, provided its path
#===============================================================================
@staticmethod
def get_MadAnalysis5_interpreter(mg5_path, ma5_path, mg5_interface=None,
logstream = sys.stdout, loglevel =logging.INFO, forced = True,
compilation=False):
""" Makes sure to correctly setup paths and constructs and return an MA5 path"""
MA5path = os.path.normpath(pjoin(mg5_path,ma5_path))
if MA5path is None or not os.path.isfile(pjoin(MA5path,'bin','ma5')):
return None
if MA5path not in sys.path:
sys.path.insert(0, MA5path)
try:
# We must backup the readline module attributes because they get modified
# when MA5 imports root and that supersedes MG5 autocompletion
import readline
old_completer = readline.get_completer()
old_delims = readline.get_completer_delims()
old_history = [readline.get_history_item(i) for i in range(1,readline.get_current_history_length()+1)]
except ImportError:
old_completer, old_delims, old_history = None, None, None
try:
from madanalysis.interpreter.ma5_interpreter import MA5Interpreter
with misc.stdchannel_redirected(sys.stdout, os.devnull):
with misc.stdchannel_redirected(sys.stderr, os.devnull):
MA5_interpreter = MA5Interpreter(MA5path, LoggerLevel=loglevel,
LoggerStream=logstream,forced=forced,
no_compilation=not compilation)
except Exception as e:
logger.warning('MadAnalysis5 failed to start so that MA5 analysis will be skipped.')
error=StringIO.StringIO()
traceback.print_exc(file=error)
logger.debug('MadAnalysis5 error was:')
logger.debug('-'*60)
logger.debug(error.getvalue()[:-1])
logger.debug('-'*60)
MA5_interpreter = None
finally:
# Now restore the readline MG5 state
if not old_history is None:
readline.clear_history()
for line in old_history:
readline.add_history(line)
if not old_completer is None:
readline.set_completer(old_completer)
if not old_delims is None:
readline.set_completer_delims(old_delims)
# Also restore the completion_display_matches_hook if an mg5 interface
# is specified as it could also have been potentially modified
if not mg5_interface is None and any(not elem is None for elem in [old_completer, old_delims, old_history]):
mg5_interface.set_readline_completion_display_matches_hook()
return MA5_interpreter
def check_madanalysis5(self, args, mode='parton'):
"""Check the argument for the madanalysis5 command
syntax: madanalysis5_parton [NAME]
"""
MA5_options = {'MA5_stdout_lvl':'default'}
stdout_level_tags = [a for a in args if a.startswith('--MA5_stdout_lvl=')]
for slt in stdout_level_tags:
lvl = slt.split('=')[1].strip()
try:
# It is likely an int
MA5_options['MA5_stdout_lvl']=int(lvl)
except ValueError:
if lvl.startswith('logging.'):
lvl = lvl[8:]
try:
MA5_options['MA5_stdout_lvl'] = getattr(logging, lvl)
except:
raise InvalidCmd("MA5 output level specification"+\
" '%s' is incorrect." % str(lvl))
args.remove(slt)
if mode=='parton':
# We will attempt to run MA5 on the parton level output
# found in the last run if not specified.
MA5_options['inputs'] = '*.lhe'
elif mode=='hadron':
# We will run MA5 on all sources of post-partonic output we
# can find if not specified. PY8 is a keyword indicating shower
# piped to MA5.
MA5_options['inputs'] = ['fromCard']
else:
raise MadGraph5Error('Mode %s not reckognized'%mode+
' in function check_madanalysis5.')
# If not madanalysis5 path
if not self.options['madanalysis5_path']:
logger.info('Now trying to read the configuration file again'+
' to find MadAnalysis5 path')
self.set_configuration()
if not self.options['madanalysis5_path'] or not \
os.path.exists(pjoin(self.options['madanalysis5_path'],'bin','ma5')):
error_msg = 'No valid MadAnalysis5 path set.\n'
error_msg += 'Please use the set command to define the path and retry.\n'
error_msg += 'You can also define it in the configuration file.\n'
error_msg += 'Finally, it can be installed automatically using the'
error_msg += ' install command.\n'
raise self.InvalidCmd(error_msg)
# Now make sure that the corresponding default card exists
if not os.path.isfile(pjoin(self.me_dir,
'Cards','madanalysis5_%s_card.dat'%mode)):
raise self.InvalidCmd('Your installed version of MadAnalysis5 and/or'+\
' MadGraph5_aMCatNLO does not seem to support analysis at'+
'%s level.'%mode)
tag = [a for a in args if a.startswith('--tag=')]
if tag:
args.remove(tag[0])
tag = tag[0][6:]
if len(args) == 0 and not self.run_name:
if self.results.lastrun:
args.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently defined. '+
'Please add this information.')
if len(args) >= 1:
if mode=='parton' and args[0] != self.run_name and \
not os.path.exists(pjoin(self.me_dir,'Events',args[0],
'unweighted_events.lhe.gz')) and not os.path.exists(
pjoin(self.me_dir,'Events',args[0])):
raise self.InvalidCmd('No events file in the %s run.'%args[0])
self.set_run_name(args[0], tag, level='madanalysis5_%s'%mode)
else:
if tag:
self.run_card['run_tag'] = args[0]
self.set_run_name(self.run_name, tag, level='madanalysis5_%s'%mode)
if mode=='parton':
if any(t for t in args if t.startswith('--input=')):
raise InvalidCmd('The option --input=<input_file> is not'+
' available when running partonic MadAnalysis5 analysis. The'+
' .lhe output of the selected run is used automatically.')
input_file = pjoin(self.me_dir,'Events',self.run_name, 'unweighted_events.lhe')
MA5_options['inputs'] = '%s.gz'%input_file
if not os.path.exists('%s.gz'%input_file):
if os.path.exists(input_file):
misc.gzip(input_file, stdout='%s.gz' % input_file)
else:
logger.warning("LHE event file not found in \n%s\ns"%input_file+
"Parton-level MA5 analysis will be skipped.")
if mode=='hadron':
# Make sure to store current results (like Pythia8 hep files)
# so that can be found here
self.store_result()
hadron_tag = [t for t in args if t.startswith('--input=')]
if hadron_tag and hadron_tag[0][8:]:
hadron_inputs = hadron_tag[0][8:].split(',')
# If not set above, then we must read it from the card
elif MA5_options['inputs'] == ['fromCard']:
hadron_inputs = banner_mod.MadAnalysis5Card(pjoin(self.me_dir,
'Cards','madanalysis5_hadron_card.dat'),mode='hadron')['inputs']
# Make sure the corresponding input files are present and unfold
# potential wildcard while making their path absolute as well.
MA5_options['inputs'] = []
special_source_tags = []
for htag in hadron_inputs:
# Possible pecial tag for MA5 run inputs
if htag in special_source_tags:
# Special check/actions
continue
# Check if the specified file exists and is not a wildcard
if os.path.isfile(htag) or (os.path.exists(htag) and
stat.S_ISFIFO(os.stat(htag).st_mode)):
MA5_options['inputs'].append(htag)
continue
# Now select one source per tag, giving priority to unzipped
# files with 'events' in their name (case-insensitive).
file_candidates = misc.glob(htag, pjoin(self.me_dir,'Events',self.run_name))+\
misc.glob('%s.gz'%htag, pjoin(self.me_dir,'Events',self.run_name))
priority_files = [f for f in file_candidates if
self.run_card['run_tag'] in os.path.basename(f)]
priority_files = [f for f in priority_files if
'EVENTS' in os.path.basename(f).upper()]
# Make sure to always prefer the original partonic event file
for f in file_candidates:
if os.path.basename(f).startswith('unweighted_events.lhe'):
priority_files.append(f)
if priority_files:
MA5_options['inputs'].append(priority_files[-1])
continue
if file_candidates:
MA5_options['inputs'].append(file_candidates[-1])
continue
return MA5_options
def ask_madanalysis5_run_configuration(self, runtype='parton',mode=None):
"""Ask the question when launching madanalysis5.
In the future we can ask here further question about the MA5 run, but
for now we just edit the cards"""
cards = ['madanalysis5_%s_card.dat'%runtype]
self.keep_cards(cards)
if self.force:
return runtype
# This heavy-looking structure of auto is just to mimick what is done
# for ask_pythia_configuration
auto=False
if mode=='auto':
auto=True
if auto:
self.ask_edit_cards(cards, mode='auto', plot=False)
else:
self.ask_edit_cards(cards, plot=False)
# For now, we don't pass any further information and simply return the
# input mode asked for
mode = runtype
return mode
def complete_madanalysis5_hadron(self,text, line, begidx, endidx):
"Complete the madanalysis5 command"
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1:
#return valid run_name
data = []
for name in banner_mod.MadAnalysis5Card._default_hadron_inputs:
data += misc.glob(pjoin('*','%s'%name), pjoin(self.me_dir, 'Events'))
data += misc.glob(pjoin('*','%s.gz'%name), pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
tmp1 = self.list_completion(text, data)
if not self.run_name:
return tmp1
else:
tmp2 = self.list_completion(text, ['-f',
'--MA5_stdout_lvl=','--input=','--no_default', '--tag='], line)
return tmp1 + tmp2
elif '--MA5_stdout_lvl=' in line and not any(arg.startswith(
'--MA5_stdout_lvl=') for arg in args):
return self.list_completion(text,
['--MA5_stdout_lvl=%s'%opt for opt in
['logging.INFO','logging.DEBUG','logging.WARNING',
'logging.CRITICAL','90']], line)
elif '--input=' in line and not any(arg.startswith(
'--input=') for arg in args):
return self.list_completion(text, ['--input=%s'%opt for opt in
(banner_mod.MadAnalysis5Card._default_hadron_inputs +['path'])], line)
else:
return self.list_completion(text, ['-f',
'--MA5_stdout_lvl=','--input=','--no_default', '--tag='], line)
def do_madanalysis5_hadron(self, line):
"""launch MadAnalysis5 at the hadron level."""
return self.run_madanalysis5(line,mode='hadron')
def run_madanalysis5(self, line, mode='parton'):
"""launch MadAnalysis5 at the parton level or at the hadron level with
a specific command line."""
# Check argument's validity
args = self.split_arg(line)
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default:
# Called issued by MG5aMC itself during a generate_event action
if mode=='parton' and not os.path.exists(pjoin(self.me_dir, 'Cards',
'madanalysis5_parton_card.dat')):
return
if mode=='hadron' and not os.path.exists(pjoin(self.me_dir, 'Cards',
'madanalysis5_hadron_card.dat')):
return
else:
# Called issued by the user itself and only MA5 will be run.
# we must therefore ask wheter the user wants to edit the card
self.ask_madanalysis5_run_configuration(runtype=mode)
if not self.options['madanalysis5_path'] or \
all(not os.path.exists(pjoin(self.me_dir, 'Cards',card)) for card in
['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat']):
if no_default:
return
else:
raise InvalidCmd('You must have MadAnalysis5 available to run'+
" this command. Consider installing it with the 'install' function.")
if not self.run_name:
MA5_opts = self.check_madanalysis5(args, mode=mode)
self.configure_directory(html_opening =False)
else:
# initialize / remove lhapdf mode
self.configure_directory(html_opening =False)
MA5_opts = self.check_madanalysis5(args, mode=mode)
# Now check that there is at least one input to run
if MA5_opts['inputs']==[]:
if no_default:
logger.warning('No hadron level input found to run MadAnalysis5 on.'+
' Skipping its hadron-level analysis.')
return
else:
raise self.InvalidCmd('\nNo input files specified or availabled for'+
' this MadAnalysis5 hadron-level run.\nPlease double-check the options of this'+
' MA5 command (or card) and which output files\nare currently in the chosen'+
" run directory '%s'."%self.run_name)
MA5_card = banner_mod.MadAnalysis5Card(pjoin(self.me_dir, 'Cards',
'madanalysis5_%s_card.dat'%mode), mode=mode)
if MA5_card._skip_analysis:
logger.info('Madanalysis5 %s-level analysis was skipped following user request.'%mode)
logger.info("To run the analysis, remove or comment the tag '%s skip_analysis' "
%banner_mod.MadAnalysis5Card._MG5aMC_escape_tag+
"in\n '%s'."%pjoin(self.me_dir, 'Cards','madanalysis5_%s_card.dat'%mode))
return
MA5_cmds_list = MA5_card.get_MA5_cmds(MA5_opts['inputs'],
pjoin(self.me_dir,'MA5_%s_ANALYSIS'%mode.upper()),
run_dir_path = pjoin(self.me_dir,'Events', self.run_name),
UFO_model_path=pjoin(self.me_dir,'bin','internal','ufomodel'),
run_tag = self.run_tag)
# Here's how to print the MA5 commands generated by MG5aMC
# for MA5_runtag, MA5_cmds in MA5_cmds_list:
# misc.sprint('****************************************')
# misc.sprint('* Commands for MA5 runtag %s:'%MA5_runtag)
# misc.sprint('\n'+('\n'.join('* %s'%cmd for cmd in MA5_cmds)))
# misc.sprint('****************************************')
self.update_status('\033[92mRunning MadAnalysis5 [arXiv:1206.1599]\033[0m',
level='madanalysis5_%s'%mode)
if mode=='hadron':
logger.info('Hadron input files considered:')
for input in MA5_opts['inputs']:
logger.info(' --> %s'%input)
elif mode=='parton':
logger.info('Parton input file considered:')
logger.info(' --> %s'%MA5_opts['inputs'])
# Obtain a main MA5 interpreter
# Ideally we would like to do it all with a single interpreter
# but we'd need a way to reset it for this.
if MA5_opts['MA5_stdout_lvl']=='default':
if MA5_card['stdout_lvl'] is None:
MA5_lvl = self.options['stdout_level']
else:
MA5_lvl = MA5_card['stdout_lvl']
else:
MA5_lvl = MA5_opts['MA5_stdout_lvl']
# Bypass initialization information
MA5_interpreter = CommonRunCmd.get_MadAnalysis5_interpreter(
self.options['mg5_path'],
self.options['madanalysis5_path'],
logstream=sys.stdout,
loglevel=100,
forced=True,
compilation=True)
# If failed to start MA5, then just leave
if MA5_interpreter is None:
return
# Make sure to only run over one analysis over each fifo.
used_up_fifos = []
# Now loop over the different MA5_runs
for MA5_runtag, MA5_cmds in MA5_cmds_list:
# Bypass the banner.
MA5_interpreter.setLogLevel(100)
# Make sure to properly initialize MA5 interpreter
if mode=='hadron':
MA5_interpreter.init_reco()
else:
MA5_interpreter.init_parton()
MA5_interpreter.setLogLevel(MA5_lvl)
if MA5_runtag!='default':
if MA5_runtag.startswith('_reco_'):
logger.info("MadAnalysis5 now running the reconstruction '%s'..."%
MA5_runtag[6:],'$MG:color:GREEN')
elif MA5_runtag=='Recasting':
logger.info("MadAnalysis5 now running the recasting...",
'$MG:color:GREEN')
else:
logger.info("MadAnalysis5 now running the '%s' analysis..."%
MA5_runtag,'$MG:color:GREEN')
# Now the magic, let's call MA5
if not CommonRunCmd.runMA5(MA5_interpreter, MA5_cmds, MA5_runtag,
pjoin(self.me_dir,'Events',self.run_name,'%s_MA5_%s.log'%(self.run_tag,MA5_runtag))):
# Unsuccessful MA5 run, we therefore stop here.
return
if MA5_runtag.startswith('_reco_'):
# When doing a reconstruction we must first link the event file
# created with MA5 reconstruction and then directly proceed to the
# next batch of instructions. There can be several output directory
# if there were several input files.
links_created=[]
for i, input in enumerate(MA5_opts['inputs']):
# Make sure it is not an lhco or root input, which would not
# undergo any reconstruction of course.
if not banner_mod.MadAnalysis5Card.events_can_be_reconstructed(input):
continue
if input.endswith('.fifo'):
if input in used_up_fifos:
# Only run once on each fifo
continue
else:
used_up_fifos.append(input)
reco_output = pjoin(self.me_dir,
'MA5_%s_ANALYSIS%s_%d'%(mode.upper(),MA5_runtag,i+1))
# Look for either a root or .lhe.gz output
reco_event_file = misc.glob('*.lhe.gz',pjoin(reco_output,'Output','_reco_events'))+\
misc.glob('*.root',pjoin(reco_output,'Output','_reco_events'))
if len(reco_event_file)==0:
raise MadGraph5Error, "MadAnalysis5 failed to produce the "+\
"reconstructed event file for reconstruction '%s'."%MA5_runtag[6:]
reco_event_file = reco_event_file[0]
# move the reconstruction output to the HTML directory
shutil.move(reco_output,pjoin(self.me_dir,'HTML',
self.run_name,'%s_MA5_%s_ANALYSIS%s_%d'%
(self.run_tag,mode.upper(),MA5_runtag,i+1)))
# link the reconstructed event file to the run directory
links_created.append(os.path.basename(reco_event_file))
files.ln(pjoin(self.me_dir,'HTML',self.run_name,
'%s_MA5_%s_ANALYSIS%s_%d'%(self.run_tag,mode.upper(),
MA5_runtag,i+1),'Output','_reco_events',links_created[-1]),
pjoin(self.me_dir,'Events',self.run_name))
logger.info("MadAnalysis5 successfully completed the reconstruction "+
"'%s'. Links to the reconstructed event files are:"%MA5_runtag[6:])
for link in links_created:
logger.info(' --> %s'%pjoin(self.me_dir,'Events',self.run_name,link))
continue
if MA5_runtag.upper()=='RECASTING':
target = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s'\
%(mode.upper(),MA5_runtag),'Output','CLs_output_summary.dat')
else:
target = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s'\
%(mode.upper(),MA5_runtag),'PDF','main.pdf')
has_pdf = True
if not os.path.isfile(target):
has_pdf = False
# Copy the PDF report or CLs in the Events/run directory.
if MA5_runtag.upper()=='RECASTING':
carboncopy_name = '%s_MA5_CLs.dat'%(self.run_tag)
else:
carboncopy_name = '%s_MA5_%s_analysis_%s.pdf'%(
self.run_tag,mode,MA5_runtag)
if has_pdf:
shutil.copy(target, pjoin(self.me_dir,'Events',self.run_name,carboncopy_name))
else:
logger.error('MadAnalysis5 failed to create PDF output')
if MA5_runtag!='default':
logger.info("MadAnalysis5 successfully completed the "+
"%s. Reported results are placed in:"%("analysis '%s'"%MA5_runtag
if MA5_runtag.upper()!='RECASTING' else "recasting"))
else:
logger.info("MadAnalysis5 successfully completed the analysis."+
" Reported results are placed in:")
logger.info(' --> %s'%pjoin(self.me_dir,'Events',self.run_name,carboncopy_name))
anal_dir = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s' %(mode.upper(),MA5_runtag))
if not os.path.exists(anal_dir):
logger.error('MadAnalysis5 failed to completed succesfully')
return
# Copy the entire analysis in the HTML directory
shutil.move(anal_dir, pjoin(self.me_dir,'HTML',self.run_name,
'%s_MA5_%s_ANALYSIS_%s'%(self.run_tag,mode.upper(),MA5_runtag)))
# Set the number of events and cross-section to the last one
# (maybe do something smarter later)
new_details={}
for detail in ['nb_event','cross','error']:
new_details[detail] = \
self.results[self.run_name].get_current_info()[detail]
for detail in new_details:
self.results.add_detail(detail,new_details[detail])
self.update_status('Finished MA5 analyses.', level='madanalysis5_%s'%mode,
makehtml=False)
#Update the banner
self.banner.add(pjoin(self.me_dir, 'Cards',
'madanalysis5_%s_card.dat'%mode))
banner_path = pjoin(self.me_dir,'Events', self.run_name,
'%s_%s_banner.txt'%(self.run_name, self.run_tag))
self.banner.write(banner_path)
if not no_default:
logger.info('Find more information about this run on the HTML local page')
logger.info(' --> %s'%pjoin(self.me_dir,'index.html'))
############################################################################
# End of MadAnalysis5 related function
############################################################################
def do_delphes(self, line):
""" run delphes and make associate root file/plot """
args = self.split_arg(line)
# Check argument's validity
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default and not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')):
logger.info('No delphes_card detected, so not run Delphes')
return
# Check all arguments
filepath = self.check_delphes(args, nodefault=no_default)
if no_default and not filepath:
return # no output file but nothing to do either.
self.update_status('prepare delphes run', level=None)
if os.path.exists(pjoin(self.options['delphes_path'], 'data')):
delphes3 = False
prog = '../bin/internal/run_delphes'
if filepath and '.hepmc' in filepath[:-10]:
raise self.InvalidCmd, 'delphes2 do not support hepmc'
else:
delphes3 = True
prog = '../bin/internal/run_delphes3'
# Check that the delphes_card exists. If not copy the default and
# ask for edition of the card.
if not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')):
if no_default:
logger.info('No delphes_card detected, so not running Delphes')
return
files.cp(pjoin(self.me_dir, 'Cards', 'delphes_card_default.dat'),
pjoin(self.me_dir, 'Cards', 'delphes_card.dat'))
logger.info('No delphes card found. Take the default one.')
if not delphes3 and not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_trigger.dat')):
files.cp(pjoin(self.me_dir, 'Cards', 'delphes_trigger_default.dat'),
pjoin(self.me_dir, 'Cards', 'delphes_trigger.dat'))
if not (no_default or self.force):
if delphes3:
self.ask_edit_cards(['delphes_card.dat'], args)
else:
self.ask_edit_cards(['delphes_card.dat', 'delphes_trigger.dat'], args)
self.update_status('Running Delphes', level=None)
delphes_dir = self.options['delphes_path']
tag = self.run_tag
if os.path.exists(pjoin(self.me_dir, 'Source', 'banner_header.txt')):
self.banner.add(pjoin(self.me_dir, 'Cards','delphes_card.dat'))
if not delphes3:
self.banner.add(pjoin(self.me_dir, 'Cards','delphes_trigger.dat'))
self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag)))
cross = self.results[self.run_name].get_current_info()['cross']
delphes_log = pjoin(self.me_dir, 'Events', self.run_name, "%s_delphes.log" % tag)
if not self.cluster:
clus = cluster.onecore
else:
clus = self.cluster
clus.launch_and_wait(prog,
argument= [delphes_dir, self.run_name, tag, str(cross), filepath],
stdout=delphes_log, stderr=subprocess.STDOUT,
cwd=pjoin(self.me_dir,'Events'))
if not os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco.gz' % tag))\
and not os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco' % tag)):
logger.info('If you are interested in lhco output. please run root2lhco converter.')
logger.info(' or edit bin/internal/run_delphes3 to run the converter automatically.')
#eradir = self.options['exrootanalysis_path']
madir = self.options['madanalysis_path']
td = self.options['td_path']
if os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco' % tag)):
# Creating plots
self.create_plot('Delphes')
if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % tag)):
misc.gzip(pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % tag))
self.update_status('delphes done', level='delphes', makehtml=False)
############################################################################
def get_pid_final_initial_states(self):
"""Find the pid of all particles in the final and initial states"""
pids = set()
subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses',
'subproc.mg'))]
nb_init = self.ninitial
pat = re.compile(r'''DATA \(IDUP\(I,\d+\),I=1,\d+\)/([\+\-\d,\s]*)/''', re.I)
for Pdir in subproc:
text = open(pjoin(self.me_dir, 'SubProcesses', Pdir, 'born_leshouche.inc')).read()
group = pat.findall(text)
for particles in group:
particles = particles.split(',')
pids.update(set(particles))
return pids
############################################################################
def get_pdf_input_filename(self):
"""return the name of the file which is used by the pdfset"""
if self.options["cluster_local_path"] and \
os.path.exists(self.options["cluster_local_path"]) and \
self.options['run_mode'] ==1:
# no need to transfer the pdf.
return ''
def check_cluster(path):
if not self.options["cluster_local_path"] or \
os.path.exists(self.options["cluster_local_path"]) or\
self.options['run_mode'] !=1:
return path
main = self.options["cluster_local_path"]
if os.path.isfile(path):
filename = os.path.basename(path)
possible_path = [pjoin(main, filename),
pjoin(main, "lhadpf", filename),
pjoin(main, "Pdfdata", filename)]
if any(os.path.exists(p) for p in possible_path):
return " "
else:
return path
if hasattr(self, 'pdffile') and self.pdffile:
return self.pdffile
else:
for line in open(pjoin(self.me_dir,'Source','PDF','pdf_list.txt')):
data = line.split()
if len(data) < 4:
continue
if data[1].lower() == self.run_card['pdlabel'].lower():
self.pdffile = check_cluster(pjoin(self.me_dir, 'lib', 'Pdfdata', data[2]))
return self.pdffile
else:
# possible when using lhapdf
path = pjoin(self.me_dir, 'lib', 'PDFsets')
if os.path.exists(path):
self.pdffile = path
else:
self.pdffile = " "
return self.pdffile
############################################################################
def do_open(self, line):
"""Open a text file/ eps file / html file"""
args = self.split_arg(line)
# Check Argument validity and modify argument to be the real path
self.check_open(args)
file_path = args[0]
misc.open_file(file_path)
############################################################################
def do_set(self, line, log=True):
"""Set an option, which will be default for coming generations/outputs
"""
# cmd calls automaticaly post_set after this command.
args = self.split_arg(line)
# Check the validity of the arguments
self.check_set(args)
# Check if we need to save this in the option file
if args[0] in self.options_configuration and '--no_save' not in args:
self.do_save('options --auto')
if args[0] == "stdout_level":
if args[1].isdigit():
logging.root.setLevel(int(args[1]))
logging.getLogger('madgraph').setLevel(int(args[1]))
else:
logging.root.setLevel(eval('logging.' + args[1]))
logging.getLogger('madgraph').setLevel(eval('logging.' + args[1]))
if log: logger.info('set output information to level: %s' % args[1])
elif args[0] == "fortran_compiler":
if args[1] == 'None':
args[1] = None
self.options['fortran_compiler'] = args[1]
current = misc.detect_current_compiler(pjoin(self.me_dir,'Source','make_opts'), 'fortran')
if current != args[1] and args[1] != None:
misc.mod_compilator(self.me_dir, args[1], current, 'gfortran')
elif args[0] == "cpp_compiler":
if args[1] == 'None':
args[1] = None
self.options['cpp_compiler'] = args[1]
current = misc.detect_current_compiler(pjoin(self.me_dir,'Source','make_opts'), 'cpp')
if current != args[1] and args[1] != None:
misc.mod_compilator(self.me_dir, args[1], current, 'cpp')
elif args[0] == "run_mode":
if not args[1] in [0,1,2,'0','1','2']:
raise self.InvalidCmd, 'run_mode should be 0, 1 or 2.'
self.cluster_mode = int(args[1])
self.options['run_mode'] = self.cluster_mode
elif args[0] in ['cluster_type', 'cluster_queue', 'cluster_temp_path']:
if args[1] == 'None':
args[1] = None
self.options[args[0]] = args[1]
# cluster (re)-initialization done later
# self.cluster update at the end of the routine
elif args[0] in ['cluster_nb_retry', 'cluster_retry_wait', 'cluster_size']:
self.options[args[0]] = int(args[1])
# self.cluster update at the end of the routine
elif args[0] == 'nb_core':
if args[1] == 'None':
import multiprocessing
self.nb_core = multiprocessing.cpu_count()
self.options['nb_core'] = self.nb_core
return
if not args[1].isdigit():
raise self.InvalidCmd('nb_core should be a positive number')
self.nb_core = int(args[1])
self.options['nb_core'] = self.nb_core
elif args[0] == 'timeout':
self.options[args[0]] = int(args[1])
elif args[0] == 'cluster_status_update':
if '(' in args[1]:
data = ' '.join([a for a in args[1:] if not a.startswith('-')])
data = data.replace('(','').replace(')','').replace(',',' ').split()
first, second = data[:2]
else:
first, second = args[1:3]
self.options[args[0]] = (int(first), int(second))
elif args[0] == 'notification_center':
if args[1] in ['None','True','False']:
self.allow_notification_center = eval(args[1])
self.options[args[0]] = eval(args[1])
else:
raise self.InvalidCmd('Not a valid value for notification_center')
# True/False formatting
elif args[0] in ['crash_on_error']:
tmp = banner_mod.ConfigFile.format_variable(args[1], bool, 'crash_on_error')
self.options[args[0]] = tmp
elif args[0] in self.options:
if args[1] in ['None','True','False']:
self.options[args[0]] = ast.literal_eval(args[1])
elif args[0].endswith('path'):
if os.path.exists(args[1]):
self.options[args[0]] = args[1]
elif os.path.exists(pjoin(self.me_dir, args[1])):
self.options[args[0]] = pjoin(self.me_dir, args[1])
else:
raise self.InvalidCmd('Not a valid path: keep previous value: \'%s\'' % self.options[args[0]])
else:
self.options[args[0]] = args[1]
def post_set(self, stop, line):
"""Check if we need to save this in the option file"""
try:
args = self.split_arg(line)
if 'cluster' in args[0] or args[0] == 'run_mode':
self.configure_run_mode(self.options['run_mode'])
# Check the validity of the arguments
self.check_set(args)
if args[0] in self.options_configuration and '--no_save' not in args:
self.exec_cmd('save options %s --auto' % args[0])
elif args[0] in self.options_madevent:
logger.info('This option will be the default in any output that you are going to create in this session.')
logger.info('In order to keep this changes permanent please run \'save options\'')
return stop
except self.InvalidCmd:
return stop
def configure_run_mode(self, run_mode):
"""change the way to submit job 0: single core, 1: cluster, 2: multicore"""
self.cluster_mode = run_mode
self.options['run_mode'] = run_mode
if run_mode == 2:
if not self.options['nb_core']:
import multiprocessing
self.options['nb_core'] = multiprocessing.cpu_count()
nb_core = self.options['nb_core']
elif run_mode == 0:
nb_core = 1
if run_mode in [0, 2]:
self.cluster = cluster.MultiCore(
**self.options)
self.cluster.nb_core = nb_core
#cluster_temp_path=self.options['cluster_temp_path'],
if self.cluster_mode == 1:
opt = self.options
cluster_name = opt['cluster_type']
if cluster_name in cluster.from_name:
self.cluster = cluster.from_name[cluster_name](**opt)
else:
if MADEVENT and ('mg5_path' not in self.options or not self.options['mg5_path']):
if not self.plugin_path:
raise self.InvalidCmd('%s not native cluster type and no PLUGIN directory available')
elif MADEVENT:
mg5dir = self.options['mg5_path']
if mg5dir not in sys.path:
sys.path.append(mg5dir)
newpath = pjoin(mg5dir, 'PLUGIN')
if newpath not in self.plugin_path:
self.plugin_path.append(newpath)
else:
mg5dir = MG5DIR
# Check if a plugin define this type of cluster
# check for PLUGIN format
for plugpath in self.plugin_path:
plugindirname = os.path.basename(plugpath)
for plug in os.listdir(plugpath):
if os.path.exists(pjoin(plugpath, plug, '__init__.py')):
try:
__import__('%s.%s' % (plugindirname,plug))
except Exception:
logger.critical('plugin directory %s/%s fail to be loaded. Please check it', plugindirname, plug)
continue
plugin = sys.modules['%s.%s' % (plugindirname,plug)]
if not hasattr(plugin, 'new_cluster'):
continue
if not misc.is_plugin_supported(plugin):
continue
if cluster_name in plugin.new_cluster:
logger.info("cluster handling will be done with PLUGIN: %s" % plug,'$MG:color:BLACK')
self.cluster = plugin.new_cluster[cluster_name](**opt)
break
else:
continue
break
else:
raise self.InvalidCmd, "%s is not recognized as a supported cluster format." % cluster_name
def check_param_card(self, path, run=True, dependent=False):
"""
1) Check that no scan parameter are present
2) Check that all the width are define in the param_card.
- If a scan parameter is define. create the iterator and recall this fonction
on the first element.
- If some width are set on 'Auto', call the computation tools.
- Check that no width are too small (raise a warning if this is the case)
3) if dependent is on True check for dependent parameter (automatic for scan)"""
pattern_scan = re.compile(r'''^(decay)?[\s\d]*scan''', re.I+re.M)
pattern_width = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
text = open(path).read()
if pattern_scan.search(text):
if not isinstance(self, cmd.CmdShell):
# we are in web mode => forbid scan due to security risk
raise Exception, "Scan are not allowed in web mode"
# at least one scan parameter found. create an iterator to go trough the cards
main_card = check_param_card.ParamCardIterator(text)
self.param_card_iterator = main_card
first_card = main_card.next(autostart=True)
first_card.write(path)
return self.check_param_card(path, run, dependent=True)
pdg_info = pattern_width.findall(text)
if pdg_info:
if run:
logger.info('Computing the width set on auto in the param_card.dat')
has_nlo = any(nlo.lower()=="@nlo" for _,nlo in pdg_info)
pdg = [pdg for pdg,nlo in pdg_info]
if not has_nlo:
self.do_compute_widths('%s %s' % (' '.join(pdg), path))
else:
self.do_compute_widths('%s %s --nlo' % (' '.join(pdg), path))
else:
logger.info('''Some width are on Auto in the card.
Those will be computed as soon as you have finish the edition of the cards.
If you want to force the computation right now and being able to re-edit
the cards afterwards, you can type \"compute_wdiths\".''')
card = check_param_card.ParamCard(path)
if dependent:
AskforEditCard.update_dependent(self, self.me_dir, card, path, timer=20)
for param in card['decay']:
width = param.value
if width == 0:
continue
try:
mass = card['mass'].get(param.lhacode).value
except Exception:
logger.warning('Missing mass in the lhef file (%s) . Please fix this (use the "update missing" command if needed)', param.lhacode[0])
continue
if mass and width/mass < 1e-12:
logger.error('The width of particle %s is too small for an s-channel resonance (%s). If you have this particle in an s-channel, this is likely to create numerical instabilities .', param.lhacode[0], width)
if CommonRunCmd.sleep_for_error:
time.sleep(5)
CommonRunCmd.sleep_for_error = False
elif not mass and width:
logger.error('The width of particle %s is different of zero for a massless particle.', param.lhacode[0])
if CommonRunCmd.sleep_for_error:
time.sleep(5)
CommonRunCmd.sleep_for_error = False
return
def add_error_log_in_html(self, errortype=None):
"""If a ME run is currently running add a link in the html output"""
# Be very carefull to not raise any error here (the traceback
#will be modify in that case.)
if hasattr(self, 'results') and hasattr(self.results, 'current') and\
self.results.current and 'run_name' in self.results.current and \
hasattr(self, 'me_dir'):
name = self.results.current['run_name']
tag = self.results.current['tag']
self.debug_output = pjoin(self.me_dir, '%s_%s_debug.log' % (name,tag))
if errortype:
self.results.current.debug = errortype
else:
self.results.current.debug = self.debug_output
else:
#Force class default
self.debug_output = CommonRunCmd.debug_output
if os.path.exists('ME5_debug') and not 'ME5_debug' in self.debug_output:
os.remove('ME5_debug')
if not 'ME5_debug' in self.debug_output:
os.system('ln -s %s ME5_debug &> /dev/null' % self.debug_output)
def do_quit(self, line):
"""Not in help: exit """
if not self.force_run:
try:
os.remove(pjoin(self.me_dir,'RunWeb'))
except Exception:
pass
try:
self.store_result()
except Exception:
# If nothing runs they they are no result to update
pass
try:
self.update_status('', level=None)
except Exception, error:
pass
self.gen_card_html()
return super(CommonRunCmd, self).do_quit(line)
# Aliases
do_EOF = do_quit
do_exit = do_quit
def update_status(self, status, level, makehtml=True, force=True,
error=False, starttime = None, update_results=True,
print_log=True):
""" update the index status """
if makehtml and not force:
if hasattr(self, 'next_update') and time.time() < self.next_update:
return
else:
self.next_update = time.time() + 3
if print_log:
if isinstance(status, str):
if '<br>' not in status:
logger.info(status)
elif starttime:
running_time = misc.format_timer(time.time()-starttime)
logger.info(' Idle: %s, Running: %s, Completed: %s [ %s ]' % \
(status[0], status[1], status[2], running_time))
else:
logger.info(' Idle: %s, Running: %s, Completed: %s' % status[:3])
if isinstance(status, str) and status.startswith('\x1b['):
status = status[status.index('m')+1:-7]
if 'arXiv' in status:
if '[' in status:
status = status.split('[',1)[0]
else:
status = status.split('arXiv',1)[0]
if update_results:
self.results.update(status, level, makehtml=makehtml, error=error)
############################################################################
def keep_cards(self, need_card=[], ignore=[]):
"""Ask the question when launching generate_events/multi_run"""
check_card = ['pythia_card.dat', 'pgs_card.dat','delphes_card.dat',
'delphes_trigger.dat', 'madspin_card.dat', 'shower_card.dat',
'reweight_card.dat','pythia8_card.dat',
'madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat',
'plot_card.dat']
cards_path = pjoin(self.me_dir,'Cards')
for card in check_card:
if card in ignore or (ignore == ['*'] and card not in need_card):
continue
if card not in need_card:
if os.path.exists(pjoin(cards_path, card)):
files.mv(pjoin(cards_path, card), pjoin(cards_path, '.%s' % card))
else:
if not os.path.exists(pjoin(cards_path, card)):
if os.path.exists(pjoin(cards_path, '.%s' % card)):
files.mv(pjoin(cards_path, '.%s' % card), pjoin(cards_path, card))
else:
default = card.replace('.dat', '_default.dat')
files.cp(pjoin(cards_path, default),pjoin(cards_path, card))
############################################################################
def set_configuration(self, config_path=None, final=True, initdir=None, amcatnlo=False):
""" assign all configuration variable from file
./Cards/mg5_configuration.txt. assign to default if not define """
if not hasattr(self, 'options') or not self.options:
self.options = dict(self.options_configuration)
self.options.update(self.options_madgraph)
self.options.update(self.options_madevent)
if not config_path:
if os.environ.has_key('MADGRAPH_BASE'):
config_path = pjoin(os.environ['MADGRAPH_BASE'],'mg5_configuration.txt')
self.set_configuration(config_path=config_path, final=False)
if 'HOME' in os.environ:
config_path = pjoin(os.environ['HOME'],'.mg5',
'mg5_configuration.txt')
if os.path.exists(config_path):
self.set_configuration(config_path=config_path, final=False)
if amcatnlo:
me5_config = pjoin(self.me_dir, 'Cards', 'amcatnlo_configuration.txt')
else:
me5_config = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt')
self.set_configuration(config_path=me5_config, final=False, initdir=self.me_dir)
if self.options.has_key('mg5_path') and self.options['mg5_path']:
MG5DIR = self.options['mg5_path']
config_file = pjoin(MG5DIR, 'input', 'mg5_configuration.txt')
self.set_configuration(config_path=config_file, final=False,initdir=MG5DIR)
else:
self.options['mg5_path'] = None
return self.set_configuration(config_path=me5_config, final=final,initdir=self.me_dir)
config_file = open(config_path)
# read the file and extract information
logger.info('load configuration from %s ' % config_file.name)
for line in config_file:
if '#' in line:
line = line.split('#',1)[0]
line = line.replace('\n','').replace('\r\n','')
try:
name, value = line.split('=')
except ValueError:
pass
else:
name = name.strip()
value = value.strip()
if name.endswith('_path') and not name.startswith('cluster'):
path = value
if os.path.isdir(path):
self.options[name] = os.path.realpath(path)
continue
if not initdir:
continue
path = pjoin(initdir, value)
if os.path.isdir(path):
self.options[name] = os.path.realpath(path)
continue
else:
self.options[name] = value
if value.lower() == "none":
self.options[name] = None
if not final:
return self.options # the return is usefull for unittest
# Treat each expected input
# delphes/pythia/... path
for key in self.options:
# Final cross check for the path
if key.endswith('path') and not key.startswith("cluster"):
path = self.options[key]
if path is None:
continue
if os.path.isdir(path):
self.options[key] = os.path.realpath(path)
continue
path = pjoin(self.me_dir, self.options[key])
if os.path.isdir(path):
self.options[key] = os.path.realpath(path)
continue
elif self.options.has_key('mg5_path') and self.options['mg5_path']:
path = pjoin(self.options['mg5_path'], self.options[key])
if os.path.isdir(path):
self.options[key] = os.path.realpath(path)
continue
self.options[key] = None
elif key.startswith('cluster') and key != 'cluster_status_update':
if key in ('cluster_nb_retry','cluster_wait_retry'):
self.options[key] = int(self.options[key])
if hasattr(self,'cluster'):
del self.cluster
pass
elif key == 'automatic_html_opening':
if self.options[key] in ['False', 'True']:
self.options[key] =ast.literal_eval(self.options[key])
elif key == "notification_center":
if self.options[key] in ['False', 'True']:
self.allow_notification_center =ast.literal_eval(self.options[key])
self.options[key] =ast.literal_eval(self.options[key])
elif key not in ['text_editor','eps_viewer','web_browser','stdout_level',
'complex_mass_scheme', 'gauge', 'group_subprocesses']:
# Default: try to set parameter
try:
self.do_set("%s %s --no_save" % (key, self.options[key]), log=False)
except self.InvalidCmd:
logger.warning("Option %s from config file not understood" \
% key)
# Configure the way to open a file:
misc.open_file.configure(self.options)
self.configure_run_mode(self.options['run_mode'])
return self.options
@staticmethod
def find_available_run_name(me_dir):
""" find a valid run_name for the current job """
name = 'run_%02d'
data = [int(s[4:j]) for s in os.listdir(pjoin(me_dir,'Events')) for
j in range(4,len(s)+1) if \
s.startswith('run_') and s[4:j].isdigit()]
return name % (max(data+[0])+1)
############################################################################
def do_decay_events(self,line):
"""Require MG5 directory: decay events with spin correlations
"""
if '-from_cards' in line and not os.path.exists(pjoin(self.me_dir, 'Cards', 'madspin_card.dat')):
return
# First need to load MadSpin
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The module decay_events requires that MG5 is installed on the system.
You can install it and set its path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import MadSpin.decay as decay
import MadSpin.interface_madspin as interface_madspin
except ImportError:
if __debug__:
raise
else:
raise self.ConfigurationError, '''Can\'t load MadSpin
The variable mg5_path might not be correctly configured.'''
self.update_status('Running MadSpin', level='madspin')
if not '-from_cards' in line and '-f' not in line:
self.keep_cards(['madspin_card.dat'], ignore=['*'])
self.ask_edit_cards(['madspin_card.dat'], 'fixed', plot=False)
self.help_decay_events(skip_syntax=True)
# load the name of the event file
args = self.split_arg(line)
self.check_decay_events(args)
# args now alway content the path to the valid files
madspin_cmd = interface_madspin.MadSpinInterface(args[0])
# pass current options to the interface
madspin_cmd.mg5cmd.options.update(self.options)
madspin_cmd.cluster = self.cluster
madspin_cmd.update_status = lambda *x,**opt: self.update_status(*x, level='madspin',**opt)
path = pjoin(self.me_dir, 'Cards', 'madspin_card.dat')
madspin_cmd.import_command_file(path)
# create a new run_name directory for this output
i = 1
while os.path.exists(pjoin(self.me_dir,'Events', '%s_decayed_%i' % (self.run_name,i))):
i+=1
new_run = '%s_decayed_%i' % (self.run_name,i)
evt_dir = pjoin(self.me_dir, 'Events')
os.mkdir(pjoin(evt_dir, new_run))
current_file = args[0].replace('.lhe', '_decayed.lhe')
new_file = pjoin(evt_dir, new_run, os.path.basename(args[0]))
if not os.path.exists(current_file):
if os.path.exists(current_file+'.gz'):
current_file += '.gz'
new_file += '.gz'
elif current_file.endswith('.gz') and os.path.exists(current_file[:-3]):
current_file = current_file[:-3]
new_file = new_file[:-3]
else:
logger.error('MadSpin fails to create any decayed file.')
return
files.mv(current_file, new_file)
logger.info("The decayed event file has been moved to the following location: ")
logger.info(new_file)
if hasattr(self, 'results'):
current = self.results.current
nb_event = self.results.current['nb_event']
if not nb_event:
current = self.results[self.run_name][0]
nb_event = current['nb_event']
cross = current['cross']
error = current['error']
self.results.add_run( new_run, self.run_card)
self.results.add_detail('nb_event', int(nb_event*madspin_cmd.efficiency))
self.results.add_detail('cross', madspin_cmd.cross)#cross * madspin_cmd.branching_ratio)
self.results.add_detail('error', madspin_cmd.error+ cross * madspin_cmd.err_branching_ratio)
self.results.add_detail('run_mode', current['run_mode'])
self.run_name = new_run
self.banner = madspin_cmd.banner
self.banner.add(path)
self.banner.write(pjoin(self.me_dir,'Events',self.run_name, '%s_%s_banner.txt' %
(self.run_name, self.run_tag)))
self.update_status('MadSpin Done', level='parton', makehtml=False)
if 'unweighted' in os.path.basename(args[0]):
self.create_plot('parton')
def complete_decay_events(self, text, line, begidx, endidx):
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1:
return self.complete_plot(text, line, begidx, endidx)
else:
return
def complete_print_results(self,text, line, begidx, endidx):
"Complete the print results command"
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1:
#return valid run_name
data = misc.glob(pjoin('*','unweighted_events.lhe.gz'),
pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
tmp1 = self.list_completion(text, data)
return tmp1
else:
data = misc.glob('*_pythia_events.hep.gz', pjoin(self.me_dir, 'Events', args[0]))
data = [os.path.basename(p).rsplit('_',1)[0] for p in data]
data += ["--mode=a", "--mode=w", "--path=", "--format=short"]
tmp1 = self.list_completion(text, data)
return tmp1
def help_print_result(self):
logger.info("syntax: print_result [RUN] [TAG] [options]")
logger.info("-- show in text format the status of the run (cross-section/nb-event/...)")
logger.info("--path= defines the path of the output file.")
logger.info("--mode=a allow to add the information at the end of the file.")
logger.info("--format=short (only if --path is define)")
logger.info(" allows to have a multi-column output easy to parse")
############################################################################
def do_check_events(self, line):
""" Run some sanity check on the generated events."""
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The module reweight requires that MG5 is installed on the system.
You can install it and set its path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import madgraph.interface.reweight_interface as reweight_interface
except ImportError:
raise self.ConfigurationError, '''Can\'t load Reweight module.
The variable mg5_path might not be correctly configured.'''
# load the name of the event file
args = self.split_arg(line)
self.check_check_events(args)
# args now alway content the path to the valid files
reweight_cmd = reweight_interface.ReweightInterface(args[0], allow_madspin=True)
reweight_cmd.mother = self
self.update_status('Running check on events', level='check')
reweight_cmd.check_events()
############################################################################
def complete_check_events(self, text, line, begidx, endidx):
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1 and os.path.sep not in text:
#return valid run_name
data = misc.glob(pjoin('*','*events.lhe*'), pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
return self.list_completion(text, data, line)
else:
return self.path_completion(text,
os.path.join('.',*[a for a in args \
if a.endswith(os.path.sep)]))
def complete_reweight(self,text, line, begidx, endidx):
"Complete the pythia command"
args = self.split_arg(line[0:begidx], error=False)
#return valid run_name
data = misc.glob(pjoin('*','*events.lhe*'), pjoin(self.me_dir, 'Events'))
data = list(set([n.rsplit('/',2)[1] for n in data]))
if not '-f' in args:
data.append('-f')
tmp1 = self.list_completion(text, data)
return tmp1
def complete_compute_widths(self, text, line, begidx, endidx, formatting=True):
"Complete the compute_widths command"
args = self.split_arg(line[0:begidx])
if args[-1] in ['--path=', '--output=']:
completion = {'path': self.path_completion(text)}
elif line[begidx-1] == os.path.sep:
current_dir = pjoin(*[a for a in args if a.endswith(os.path.sep)])
if current_dir.startswith('--path='):
current_dir = current_dir[7:]
if current_dir.startswith('--output='):
current_dir = current_dir[9:]
completion = {'path': self.path_completion(text, current_dir)}
else:
completion = {}
completion['options'] = self.list_completion(text,
['--path=', '--output=', '--min_br=0.\$', '--nlo',
'--precision_channel=0.\$', '--body_decay='])
return self.deal_multiple_categories(completion, formatting)
def update_make_opts(self):
"""update the make_opts file writing the environmental variables
stored in make_opts_var"""
make_opts = os.path.join(self.me_dir, 'Source', 'make_opts')
# Set some environment variables common to all interfaces
if not hasattr(self,'options') or not 'pythia8_path' in self.options or \
not self.options['pythia8_path'] or \
not os.path.isfile(pjoin(self.options['pythia8_path'],'bin','pythia8-config')):
self.make_opts_var['PYTHIA8_PATH']='NotInstalled'
else:
self.make_opts_var['PYTHIA8_PATH']=self.options['pythia8_path']
self.make_opts_var['MG5AMC_VERSION'] = misc.get_pkg_info()['version']
return self.update_make_opts_full(make_opts, self.make_opts_var)
@staticmethod
def update_make_opts_full(path, def_variables, keep_old=True):
"""update the make_opts file writing the environmental variables
of def_variables.
if a value of the dictionary is None then it is not written.
"""
make_opts = path
pattern = re.compile(r'^(\w+)\s*=\s*(.*)$',re.DOTALL)
diff = False # set to True if one varible need to be updated
#if on False the file is not modify
tag = '#end_of_make_opts_variables\n'
make_opts_variable = True # flag to say if we are in edition area or not
content = []
variables = dict(def_variables)
need_keys = variables.keys()
for line in open(make_opts):
line = line.strip()
if make_opts_variable:
if line.startswith('#') or not line:
if line.startswith('#end_of_make_opts_variables'):
make_opts_variable = False
continue
elif pattern.search(line):
key, value = pattern.search(line).groups()
if key not in variables:
variables[key] = value
elif value != variables[key]:
diff=True
else:
need_keys.remove(key)
else:
make_opts_variable = False
content.append(line)
else:
content.append(line)
if need_keys:
diff=True #This means that new definition are added to the file.
content_variables = '\n'.join('%s=%s' % (k,v) for k, v in variables.items() if v is not None)
content_variables += '\n%s' % tag
if diff:
with open(make_opts, 'w') as fsock:
fsock.write(content_variables + '\n'.join(content))
return
# lhapdf-related functions
def link_lhapdf(self, libdir, extra_dirs = []):
"""links lhapdf into libdir"""
lhapdf_version = self.get_lhapdf_version()
logger.info('Using LHAPDF v%s interface for PDFs' % lhapdf_version)
lhalibdir = subprocess.Popen([self.options['lhapdf'], '--libdir'],
stdout = subprocess.PIPE).stdout.read().strip()
if lhapdf_version.startswith('5.'):
pdfsetsdir = subprocess.Popen([self.options['lhapdf'], '--pdfsets-path'],
stdout = subprocess.PIPE).stdout.read().strip()
else:
pdfsetsdir = subprocess.Popen([self.options['lhapdf'], '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
self.lhapdf_pdfsets = self.get_lhapdf_pdfsets_list(pdfsetsdir)
# link the static library in lib
lhalib = 'libLHAPDF.a'
if os.path.exists(pjoin(libdir, lhalib)):
files.rm(pjoin(libdir, lhalib))
files.ln(pjoin(lhalibdir, lhalib), libdir)
# just create the PDFsets dir, the needed PDF set will be copied at run time
if not os.path.isdir(pjoin(libdir, 'PDFsets')):
os.mkdir(pjoin(libdir, 'PDFsets'))
self.make_opts_var['lhapdf'] = self.options['lhapdf']
self.make_opts_var['lhapdfversion'] = lhapdf_version[0]
self.make_opts_var['lhapdfsubversion'] = lhapdf_version.split('.',2)[1]
self.make_opts_var['lhapdf_config'] = self.options['lhapdf']
def get_characteristics(self, path=None):
"""reads the proc_characteristics file and initialises the correspondant
dictionary"""
if not path:
path = os.path.join(self.me_dir, 'SubProcesses', 'proc_characteristics')
self.proc_characteristics = banner_mod.ProcCharacteristic(path)
return self.proc_characteristics
def copy_lhapdf_set(self, lhaid_list, pdfsets_dir):
"""copy (if needed) the lhapdf set corresponding to the lhaid in lhaid_list
into lib/PDFsets"""
if not hasattr(self, 'lhapdf_pdfsets'):
self.lhapdf_pdfsets = self.get_lhapdf_pdfsets_list(pdfsets_dir)
pdfsetname=set()
for lhaid in lhaid_list:
if isinstance(lhaid, str) and lhaid.isdigit():
lhaid = int(lhaid)
if isinstance(lhaid, (int,float)):
try:
if lhaid in self.lhapdf_pdfsets:
pdfsetname.add(self.lhapdf_pdfsets[lhaid]['filename'])
else:
raise MadGraph5Error('lhaid %s not valid input number for the current lhapdf' % lhaid )
except KeyError:
if self.lhapdf_version.startswith('5'):
raise MadGraph5Error(\
('invalid lhaid set in th run_card: %d .\nPlease note that some sets' % lhaid) + \
'(eg MSTW 90%CL error sets) \nare not available in aMC@NLO + LHAPDF 5.x.x')
else:
logger.debug('%d not found in pdfsets.index' % lhaid)
else:
pdfsetname.add(lhaid)
# check if the file exists, otherwise install it:
# also check that the PDFsets dir exists, otherwise create it.
# if fails, install the lhapdfset into lib/PDFsets
if not os.path.isdir(pdfsets_dir):
try:
os.mkdir(pdfsets_dir)
except OSError:
pdfsets_dir = pjoin(self.me_dir, 'lib', 'PDFsets')
elif os.path.exists(pjoin(self.me_dir, 'lib', 'PDFsets')):
#clean previous set of pdf used
for name in os.listdir(pjoin(self.me_dir, 'lib', 'PDFsets')):
if name not in pdfsetname:
try:
if os.path.isdir(pjoin(self.me_dir, 'lib', 'PDFsets', name)):
shutil.rmtree(pjoin(self.me_dir, 'lib', 'PDFsets', name))
else:
os.remove(pjoin(self.me_dir, 'lib', 'PDFsets', name))
except Exception, error:
logger.debug('%s', error)
if self.options["cluster_local_path"]:
lhapdf_cluster_possibilities = [self.options["cluster_local_path"],
pjoin(self.options["cluster_local_path"], "lhapdf"),
pjoin(self.options["cluster_local_path"], "lhapdf", "pdfsets"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf", "pdfsets"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf","pdfsets", "6.1")
]
else:
lhapdf_cluster_possibilities = []
for pdfset in pdfsetname:
# Check if we need to copy the pdf
if self.options["cluster_local_path"] and self.options["run_mode"] == 1 and \
any((os.path.exists(pjoin(d, pdfset)) for d in lhapdf_cluster_possibilities)):
os.environ["LHAPATH"] = [d for d in lhapdf_cluster_possibilities if os.path.exists(pjoin(d, pdfset))][0]
os.environ["CLUSTER_LHAPATH"] = os.environ["LHAPATH"]
# no need to copy it
if os.path.exists(pjoin(pdfsets_dir, pdfset)):
try:
if os.path.isdir(pjoin(pdfsets_dir, name)):
shutil.rmtree(pjoin(pdfsets_dir, name))
else:
os.remove(pjoin(pdfsets_dir, name))
except Exception, error:
logger.debug('%s', error)
#check that the pdfset is not already there
elif not os.path.exists(pjoin(self.me_dir, 'lib', 'PDFsets', pdfset)) and \
not os.path.isdir(pjoin(self.me_dir, 'lib', 'PDFsets', pdfset)):
if pdfset and not os.path.exists(pjoin(pdfsets_dir, pdfset)):
self.install_lhapdf_pdfset(pdfsets_dir, pdfset)
if os.path.exists(pjoin(pdfsets_dir, pdfset)):
files.cp(pjoin(pdfsets_dir, pdfset), pjoin(self.me_dir, 'lib', 'PDFsets'))
elif os.path.exists(pjoin(os.path.dirname(pdfsets_dir), pdfset)):
files.cp(pjoin(os.path.dirname(pdfsets_dir), pdfset), pjoin(self.me_dir, 'lib', 'PDFsets'))
def install_lhapdf_pdfset(self, pdfsets_dir, filename):
"""idownloads and install the pdfset filename in the pdfsets_dir"""
lhapdf_version = self.get_lhapdf_version()
local_path = pjoin(self.me_dir, 'lib', 'PDFsets')
return self.install_lhapdf_pdfset_static(self.options['lhapdf'],
pdfsets_dir, filename,
lhapdf_version=lhapdf_version,
alternate_path=local_path)
@staticmethod
def install_lhapdf_pdfset_static(lhapdf_config, pdfsets_dir, filename,
lhapdf_version=None, alternate_path=None):
"""idownloads and install the pdfset filename in the pdfsets_dir.
Version which can be used independently of the class.
local path is used if the global installation fails.
"""
if not lhapdf_version:
lhapdf_version = subprocess.Popen([lhapdf_config, '--version'],
stdout = subprocess.PIPE).stdout.read().strip()
if not pdfsets_dir:
pdfsets_dir = subprocess.Popen([lhapdf_config, '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
if isinstance(filename, int):
pdf_info = CommonRunCmd.get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version)
filename = pdf_info[filename]['filename']
if os.path.exists(pjoin(pdfsets_dir, filename)):
logger.debug('%s is already present in %s', filename, pdfsets_dir)
return
logger.info('Trying to download %s' % filename)
if lhapdf_version.startswith('5.'):
# use the lhapdf-getdata command, which is in the same path as
# lhapdf-config
getdata = lhapdf_config.replace('lhapdf-config', ('lhapdf-getdata'))
misc.call([getdata, filename], cwd = pdfsets_dir)
elif lhapdf_version.startswith('6.'):
# use the "lhapdf install xxx" command, which is in the same path as
# lhapdf-config
getdata = lhapdf_config.replace('lhapdf-config', ('lhapdf'))
misc.call([getdata, 'install', filename], cwd = pdfsets_dir)
else:
raise MadGraph5Error('Not valid LHAPDF version: %s' % lhapdf_version)
# check taht the file has been installed in the global dir
if os.path.exists(pjoin(pdfsets_dir, filename)) or \
os.path.isdir(pjoin(pdfsets_dir, filename)):
logger.info('%s successfully downloaded and stored in %s' \
% (filename, pdfsets_dir))
#otherwise (if v5) save it locally
elif lhapdf_version.startswith('5.'):
logger.warning('Could not download %s into %s. Trying to save it locally' \
% (filename, pdfsets_dir))
CommonRunCmd.install_lhapdf_pdfset_static(lhapdf_config, alternate_path, filename,
lhapdf_version=lhapdf_version)
elif lhapdf_version.startswith('6.') and '.LHgrid' in filename:
logger.info('Could not download %s: Try %s', filename, filename.replace('.LHgrid',''))
return CommonRunCmd.install_lhapdf_pdfset_static(lhapdf_config, pdfsets_dir,
filename.replace('.LHgrid',''),
lhapdf_version, alternate_path)
else:
raise MadGraph5Error, \
'Could not download %s into %s. Please try to install it manually.' \
% (filename, pdfsets_dir)
def get_lhapdf_pdfsets_list(self, pdfsets_dir):
"""read the PDFsets.index file, which should be located in the same
place as pdfsets_dir, and return a list of dictionaries with the information
about each pdf set"""
lhapdf_version = self.get_lhapdf_version()
return self.get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version)
@staticmethod
def get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version):
if lhapdf_version.startswith('5.'):
if os.path.exists('%s.index' % pdfsets_dir):
indexfile = '%s.index' % pdfsets_dir
else:
raise MadGraph5Error, 'index of lhapdf file not found'
pdfsets_lines = \
[l for l in open(indexfile).read().split('\n') if l.strip() and \
not '90cl' in l]
lhapdf_pdfsets = dict( (int(l.split()[0]), {'lhaid': int(l.split()[0]),
'pdflib_ntype': int(l.split()[1]),
'pdflib_ngroup': int(l.split()[2]),
'pdflib_nset': int(l.split()[3]),
'filename': l.split()[4],
'lhapdf_nmem': int(l.split()[5]),
'q2min': float(l.split()[6]),
'q2max': float(l.split()[7]),
'xmin': float(l.split()[8]),
'xmax': float(l.split()[9]),
'description': l.split()[10]}) \
for l in pdfsets_lines)
elif lhapdf_version.startswith('6.'):
pdfsets_lines = \
[l for l in open(pjoin(pdfsets_dir, 'pdfsets.index')).read().split('\n') if l.strip()]
lhapdf_pdfsets = dict( (int(l.split()[0]),
{'lhaid': int(l.split()[0]),
'filename': l.split()[1]}) \
for l in pdfsets_lines)
else:
raise MadGraph5Error('Not valid LHAPDF version: %s' % lhapdf_version)
return lhapdf_pdfsets
def get_lhapdf_version(self):
"""returns the lhapdf version number"""
if not hasattr(self, 'lhapdfversion'):
try:
self.lhapdf_version = \
subprocess.Popen([self.options['lhapdf'], '--version'],
stdout = subprocess.PIPE).stdout.read().strip()
except OSError, error:
if error.errno == 2:
raise Exception, 'lhapdf executable (%s) is not found on your system. Please install it and/or indicate the path to the correct executable in input/mg5_configuration.txt' % self.options['lhapdf']
else:
raise
# this will be removed once some issues in lhapdf6 will be fixed
if self.lhapdf_version.startswith('6.0'):
raise MadGraph5Error('LHAPDF 6.0.x not supported. Please use v6.1 or later')
if self.lhapdf_version.startswith('6.2'):
logger.warning('Support of LHAPDF 6.2.x is still in beta phase. Consider to use LHAPDF 6.1.x in case of problem.')
return self.lhapdf_version
def get_lhapdf_pdfsetsdir(self):
lhapdf_version = self.get_lhapdf_version()
# check if the LHAPDF_DATA_PATH variable is defined
if 'LHAPDF_DATA_PATH' in os.environ.keys() and os.environ['LHAPDF_DATA_PATH']:
datadir = os.environ['LHAPDF_DATA_PATH']
elif lhapdf_version.startswith('5.'):
datadir = subprocess.Popen([self.options['lhapdf'], '--pdfsets-path'],
stdout = subprocess.PIPE).stdout.read().strip()
elif lhapdf_version.startswith('6.'):
datadir = subprocess.Popen([self.options['lhapdf'], '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
return datadir
def get_lhapdf_libdir(self):
lhapdf_version = self.get_lhapdf_version()
if lhapdf_version.startswith('5.'):
libdir = subprocess.Popen([self.options['lhapdf-config'], '--libdir'],
stdout = subprocess.PIPE).stdout.read().strip()
elif lhapdf_version.startswith('6.'):
libdir = subprocess.Popen([self.options['lhapdf'], '--libs'],
stdout = subprocess.PIPE).stdout.read().strip()
return libdir
class AskforEditCard(cmd.OneLinePathCompletion):
"""A class for asking a question where in addition you can have the
set command define and modifying the param_card/run_card correctly"""
all_card_name = ['param_card', 'run_card', 'pythia_card', 'pythia8_card',
'madweight_card', 'MadLoopParams', 'shower_card']
special_shortcut = {'ebeam':([float],['run_card ebeam1 %(0)s', 'run_card ebeam2 %(0)s']),
'lpp': ([int],['run_card lpp1 %(0)s', 'run_card lpp2 %(0)s' ]),
'lhc': ([int],['run_card lpp1 1', 'run_card lpp2 1', 'run_card ebeam1 %(0)s*1000/2', 'run_card ebeam2 %(0)s*1000/2']),
'lep': ([int],['run_card lpp1 0', 'run_card lpp2 0', 'run_card ebeam1 %(0)s/2', 'run_card ebeam2 %(0)s/2']),
'ilc': ([int],['run_card lpp1 0', 'run_card lpp2 0', 'run_card ebeam1 %(0)s/2', 'run_card ebeam2 %(0)s/2']),
'lcc': ([int],['run_card lpp1 1', 'run_card lpp2 1', 'run_card ebeam1 %(0)s*1000/2', 'run_card ebeam2 %(0)s*1000/2']),
'fixed_scale': ([float],['run_card fixed_fac_scale T', 'run_card fixed_ren_scale T', 'run_card scale %(0)s', 'run_card dsqrt_q2fact1 %(0)s' ,'run_card dsqrt_q2fact2 %(0)s']),
'simplepy8':([],['pythia8_card hadronlevel:all False',
'pythia8_card partonlevel:mpi False',
'pythia8_card BeamRemnants:primordialKT False',
'pythia8_card PartonLevel:Remnants False',
'pythia8_card Check:event False',
'pythia8_card TimeShower:QEDshowerByQ False',
'pythia8_card TimeShower:QEDshowerByL False',
'pythia8_card SpaceShower:QEDshowerByQ False',
'pythia8_card SpaceShower:QEDshowerByL False',
'pythia8_card PartonLevel:FSRinResonances False',
'pythia8_card ProcessLevel:resonanceDecays False',
]),
'mpi':([bool],['pythia8_card partonlevel:mpi %(0)s']),
'no_parton_cut':([],['run_card nocut T'])
}
special_shortcut_help = {
'ebeam' : 'syntax: set ebeam VALUE:\n This parameter sets the energy to both beam to the value in GeV',
'lpp' : 'syntax: set ebeam VALUE:\n'+\
' Set the type of beam to a given value for both beam\n'+\
' 0 : means no PDF\n'+\
' 1 : means proton PDF\n'+\
' -1 : means antiproton PDF\n'+\
' 2 : means PDF for elastic photon emited from a proton\n'+\
' 3 : means PDF for elastic photon emited from an electron',
'lhc' : 'syntax: set lhc VALUE:\n Set for a proton-proton collision with that given center of mass energy (in TeV)',
'lep' : 'syntax: set lep VALUE:\n Set for a electron-positron collision with that given center of mass energy (in GeV)',
'fixed_scale' : 'syntax: set fixed_scale VALUE:\n Set all scales to the give value (in GeV)',
'simplepy8' : 'Turn off non-perturbative slow features of Pythia8.',
'mpi' : 'syntax: set mpi value: allow to turn mpi in Pythia8 on/off'
}
def load_default(self):
""" define all default variable. No load of card here.
This allow to subclass this class and just change init and still have
all variables defined."""
self.me_dir = None
self.param_card = None
self.run_card = {}
self.pname2block = {}
self.conflict = []
self.restricted_value = {}
self.mode = ''
self.cards = []
self.run_set = []
self.has_mw = False
self.has_ml = False
self.has_shower = False
self.has_PY8 = False
self.paths = {}
def define_paths(self, **opt):
# Initiation
if 'pwd' in opt:
self.me_dir = opt['pwd']
elif 'mother_interface' in opt:
self.mother_interface = opt['mother_interface']
if not hasattr(self, 'me_dir') or not self.me_dir:
self.me_dir = self.mother_interface.me_dir
#define paths
self.paths['param'] = pjoin(self.me_dir,'Cards','param_card.dat')
self.paths['param_default'] = pjoin(self.me_dir,'Cards','param_card_default.dat')
self.paths['run'] = pjoin(self.me_dir,'Cards','run_card.dat')
self.paths['run_default'] = pjoin(self.me_dir,'Cards','run_card_default.dat')
self.paths['transfer'] =pjoin(self.me_dir,'Cards','transfer_card.dat')
self.paths['MadWeight'] =pjoin(self.me_dir,'Cards','MadWeight_card.dat')
self.paths['MadWeight_default'] =pjoin(self.me_dir,'Cards','MadWeight_card_default.dat')
self.paths['ML'] =pjoin(self.me_dir,'Cards','MadLoopParams.dat')
self.paths['shower'] = pjoin(self.me_dir,'Cards','shower_card.dat')
self.paths['shower_default'] = pjoin(self.me_dir,'Cards','shower_card_default.dat')
self.paths['FO_analyse'] = pjoin(self.me_dir,'Cards','FO_analyse_card.dat')
self.paths['FO_analyse_default'] = pjoin(self.me_dir,'Cards','FO_analyse_card_default.dat')
self.paths['pythia'] =pjoin(self.me_dir, 'Cards','pythia_card.dat')
self.paths['pythia8'] = pjoin(self.me_dir, 'Cards','pythia8_card.dat')
self.paths['pythia8_default'] = pjoin(self.me_dir, 'Cards','pythia8_card_default.dat')
self.paths['madspin_default'] = pjoin(self.me_dir,'Cards/madspin_card_default.dat')
self.paths['madspin'] = pjoin(self.me_dir,'Cards/madspin_card.dat')
self.paths['reweight'] = pjoin(self.me_dir,'Cards','reweight_card.dat')
self.paths['delphes'] = pjoin(self.me_dir,'Cards','delphes_card.dat')
self.paths['plot'] = pjoin(self.me_dir,'Cards','plot_card.dat')
self.paths['plot_default'] = pjoin(self.me_dir,'Cards','plot_card_default.dat')
self.paths['madanalysis5_parton'] = pjoin(self.me_dir,'Cards','madanalysis5_parton_card.dat')
self.paths['madanalysis5_hadron'] = pjoin(self.me_dir,'Cards','madanalysis5_hadron_card.dat')
self.paths['madanalysis5_parton_default'] = pjoin(self.me_dir,'Cards','madanalysis5_parton_card_default.dat')
self.paths['madanalysis5_hadron_default'] = pjoin(self.me_dir,'Cards','madanalysis5_hadron_card_default.dat')
self.paths['FO_analyse'] = pjoin(self.me_dir,'Cards', 'FO_analyse_card.dat')
def __init__(self, question, cards=[], mode='auto', *args, **opt):
self.load_default()
self.define_paths(**opt)
cmd.OneLinePathCompletion.__init__(self, question, *args, **opt)
try:
self.param_card = check_param_card.ParamCard(self.paths['param'])
except (check_param_card.InvalidParamCard, ValueError) as e:
logger.error('Current param_card is not valid. We are going to use the default one.')
logger.error('problem detected: %s' % e)
files.cp(self.paths['param_default'], self.paths['param'])
self.param_card = check_param_card.ParamCard(self.paths['param'])
default_param = check_param_card.ParamCard(self.paths['param_default'])
self.param_card_default = default_param
try:
self.run_card = banner_mod.RunCard(self.paths['run'], consistency='warning')
except IOError:
self.run_card = {}
try:
run_card_def = banner_mod.RunCard(self.paths['run_default'])
except IOError:
run_card_def = {}
self.pname2block = {}
self.conflict = []
self.restricted_value = {}
self.mode = mode
self.cards = cards
# Read the comment of the param_card_default to find name variable for
# the param_card also check which value seems to be constrained in the
# model.
self.pname2block, self.restricted_value = \
default_param.analyze_param_card()
if run_card_def:
self.run_set = run_card_def.keys() + self.run_card.hidden_param
elif self.run_card:
self.run_set = self.run_card.keys()
else:
self.run_set = []
# check for conflict with run_card
for var in self.pname2block:
if var in self.run_set:
self.conflict.append(var)
self.has_delphes = False
if 'delphes_card.dat' in cards:
self.has_delphes = True
#check if Madweight_card is present:
self.has_mw = False
if 'madweight_card.dat' in cards:
self.do_change_tf = self.mother_interface.do_define_transfer_fct
self.complete_change_tf = self.mother_interface.complete_define_transfer_fct
self.help_change_tf = self.mother_interface.help_define_transfer_fct
if not os.path.exists(self.paths['transfer']):
logger.warning('No transfer function currently define. Please use the change_tf command to define one.')
self.has_mw = True
try:
import madgraph.madweight.Cards as mwcards
except:
import internal.madweight.Cards as mwcards
self.mw_card = mwcards.Card(self.paths['MadWeight'])
self.mw_card = self.mw_card.info
self.mw_vars = []
for key in self.mw_card:
if key == 'comment':
continue
for key2 in self.mw_card.info[key]:
if isinstance(key2, str) and not key2.isdigit():
self.mw_vars.append(key2)
# check for conflict with run_card/param_card
for var in self.pname2block:
if var in self.mw_vars:
self.conflict.append(var)
for var in self.mw_vars:
if var in self.run_card:
self.conflict.append(var)
#check if MadLoopParams.dat is present:
self.has_ml = False
if os.path.isfile(self.paths['ML']):
self.has_ml = True
self.MLcard = banner_mod.MadLoopParam(self.paths['ML'])
self.MLcardDefault = banner_mod.MadLoopParam()
self.ml_vars = [k.lower() for k in self.MLcard.keys()]
# check for conflict
for var in self.ml_vars:
if var in self.run_card:
self.conflict.append(var)
if var in self.pname2block:
self.conflict.append(var)
if self.has_mw and var in self.mw_vars:
self.conflict.append(var)
#check if shower_card is present:
self.has_shower = False
if 'shower_card.dat' in cards:
self.has_shower = True
self.shower_card = shower_card_mod.ShowerCard(self.paths['shower'])
self.shower_vars = self.shower_card.keys()
# check for conflict with run_card/param_card
for var in self.pname2block:
if var in self.shower_vars:
self.conflict.append(var)
for var in self.shower_vars:
if var in self.run_card:
self.conflict.append(var)
#check if pythia8_card.dat is present:
self.has_PY8 = False
if 'pythia8_card.dat' in cards:
self.has_PY8 = True
self.PY8Card = banner_mod.PY8Card(self.paths['pythia8'])
self.PY8CardDefault = banner_mod.PY8Card()
self.py8_vars = [k.lower() for k in self.PY8Card.keys()]
# check for conflict
for var in self.py8_vars:
if var in self.run_card:
self.conflict.append(var)
if var in self.pname2block:
self.conflict.append(var)
if self.has_mw and var in self.mw_vars:
self.conflict.append(var)
if self.has_ml and var in self.ml_vars:
self.conflict.append(var)
def do_help(self, line, conflict_raise=False, banner=True):
# try:
if banner:
logger.info('*** HELP MESSAGE ***', '$MG:color:BLACK')
args = self.split_arg(line)
# handle comand related help
if len(args)==0 or (len(args) == 1 and hasattr(self, 'do_%s' % args[0])):
out = cmd.BasicCmd.do_help(self, line)
if len(args)==0:
print 'Allowed Argument'
print '================'
print '\t'.join(self.allow_arg)
print
print 'Special shortcut: (type help <name>)'
print '===================================='
print ' syntax: set <name> <value>'
print '\t'.join(self.special_shortcut)
print
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return out
# check for special shortcut.
# special shortcut:
if args[0] in self.special_shortcut:
if args[0] in self.special_shortcut_help:
print self.special_shortcut_help[args[0]]
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return
start = 0
card = ''
if args[0]+'_card' in self.all_card_name+ self.cards:
args[0] += '_card'
elif args[0]+'.dat' in self.all_card_name+ self.cards:
args[0] += '.dat'
elif args[0]+'_card.dat' in self.all_card_name+ self.cards:
args[0] += '_card.dat'
if args[0] in self.all_card_name + self.cards:
start += 1
card = args[0]
if len(args) == 1:
if args[0] == 'pythia8_card':
args[0] = 'PY8Card'
if args[0] == 'param_card':
logger.info("Param_card information: ", '$MG:color:BLUE')
print "File to define the various model parameter"
logger.info("List of the Block defined:",'$MG:color:BLUE')
print "\t".join(self.param_card.keys())
elif args[0].startswith('madanalysis5'):
print 'This card allow to make plot with the madanalysis5 package'
print 'An example card is provided. For more information about the '
print 'syntax please refer to: https://madanalysis.irmp.ucl.ac.be/'
print 'or to the user manual [arXiv:1206.1599]'
if args[0].startswith('madanalysis5_hadron'):
print
print 'This card also allow to make recasting analysis'
print 'For more detail, see: arXiv:1407.3278'
elif hasattr(self, args[0]):
logger.info("%s information: " % args[0], '$MG:color:BLUE')
print(eval('self.%s' % args[0]).__doc__)
logger.info("List of parameter associated", '$MG:color:BLUE')
print "\t".join(eval('self.%s' % args[0]).keys())
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return
#### RUN CARD
if args[start] in [l.lower() for l in self.run_card.keys()] and card in ['', 'run_card']:
if args[start] not in self.run_set:
args[start] = [l for l in self.run_set if l.lower() == args[start]][0]
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the run_card file', '$MG:color:BLACK')
self.run_card.do_help(args[start])
### PARAM_CARD WITH BLOCK NAME -----------------------------------------
elif (args[start] in self.param_card or args[start] == 'width') \
and card in ['','param_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the param_card file', '$MG:color:BLACK')
if args[start] == 'width':
args[start] = 'decay'
if len(args) == start+1:
self.param_card.do_help(args[start], tuple())
key = None
elif args[start+1] in self.pname2block:
all_var = self.pname2block[args[start+1]]
key = None
for bname, lhaid in all_var:
if bname == args[start]:
key = lhaid
break
else:
logger.warning('%s is not part of block "%s" but "%s". please correct.' %
(args[start+1], args[start], bname))
else:
try:
key = tuple([int(i) for i in args[start+1:]])
except ValueError:
logger.warning('Failed to identify LHA information')
return
if key in self.param_card[args[start]].param_dict:
self.param_card.do_help(args[start], key, default=self.param_card_default)
elif key:
logger.warning('invalid information: %s not defined in the param_card' % (key,))
# PARAM_CARD NO BLOCK NAME ---------------------------------------------
elif args[start] in self.pname2block and card in ['','param_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the param_card file', '$MG:color:BLACK')
all_var = self.pname2block[args[start]]
for bname, lhaid in all_var:
new_line = 'param_card %s %s %s' % (bname,
' '.join([ str(i) for i in lhaid]), ' '.join(args[start+1:]))
self.do_help(new_line, conflict_raise=True, banner=False)
# MadLoop Parameter ---------------------------------------------------
elif self.has_ml and args[start] in self.ml_vars \
and card in ['', 'MadLoop_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the madloop_card file', '$MG:color:BLACK')
self.MLcard.do_help(args[start])
# Pythia8 Parameter ---------------------------------------------------
elif self.has_PY8 and args[start] in self.PY8Card:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the pythia8_card file', '$MG:color:BLACK')
self.PY8Card.do_help(args[start])
elif card.startswith('madanalysis5'):
print 'MA5'
else:
print "no help available"
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
#raw_input('press enter to quit the help')
return
# except Exception, error:
# if __debug__:
# import traceback
# traceback.print_exc()
# print error
def complete_help(self, text, line, begidx, endidx):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
# try:
possibilities = self.complete_set(text, line, begidx, endidx,formatting=False)
if line[:begidx].strip() == 'help':
possibilities['Defined command'] = cmd.BasicCmd.completenames(self, text, line)#, begidx, endidx)
possibilities.update(self.complete_add(text, line, begidx, endidx,formatting=False))
return self.deal_multiple_categories(possibilities)
# except Exception, error:
# import traceback
# traceback.print_exc()
# print error
def complete_update(self, text, line, begidx, endidx):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
arg = line[:begidx].split()
if len(arg) <=1:
return self.list_completion(text, ['dependent', 'missing', 'to_slha1', 'to_slha2'], line)
def complete_set(self, text, line, begidx, endidx, formatting=True):
""" Complete the set command"""
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
possibilities = {}
allowed = {}
args = self.split_arg(line[0:begidx])
if args[-1] in ['Auto', 'default']:
return
if len(args) == 1:
allowed = {'category':'', 'run_card':'', 'block':'all', 'param_card':'','shortcut':''}
if self.has_mw:
allowed['madweight_card'] = ''
allowed['mw_block'] = 'all'
if self.has_shower:
allowed['shower_card'] = ''
if self.has_ml:
allowed['madloop_card'] = ''
if self.has_PY8:
allowed['pythia8_card'] = ''
if self.has_delphes:
allowed['delphes_card'] = ''
elif len(args) == 2:
if args[1] == 'run_card':
allowed = {'run_card':'default'}
elif args[1] == 'param_card':
allowed = {'block':'all', 'param_card':'default'}
elif args[1] in self.param_card.keys():
allowed = {'block':args[1]}
elif args[1] == 'width':
allowed = {'block': 'decay'}
elif args[1] == 'MadWeight_card':
allowed = {'madweight_card':'default', 'mw_block': 'all'}
elif args[1] == 'MadLoop_card':
allowed = {'madloop_card':'default'}
elif args[1] == 'pythia8_card':
allowed = {'pythia8_card':'default'}
elif self.has_mw and args[1] in self.mw_card.keys():
allowed = {'mw_block':args[1]}
elif args[1] == 'shower_card':
allowed = {'shower_card':'default'}
elif args[1] == 'delphes_card':
allowed = {'delphes_card':'default'}
else:
allowed = {'value':''}
else:
start = 1
if args[1] in ['run_card', 'param_card', 'MadWeight_card', 'shower_card',
'MadLoop_card','pythia8_card','delphes_card','plot_card',
'madanalysis5_parton_card','madanalysis5_hadron_card']:
start = 2
if args[-1] in self.pname2block.keys():
allowed['value'] = 'default'
elif args[start] in self.param_card.keys() or args[start] == 'width':
if args[start] == 'width':
args[start] = 'decay'
if args[start+1:]:
allowed = {'block':(args[start], args[start+1:])}
else:
allowed = {'block':args[start]}
elif self.has_mw and args[start] in self.mw_card.keys():
if args[start+1:]:
allowed = {'mw_block':(args[start], args[start+1:])}
else:
allowed = {'mw_block':args[start]}
#elif len(args) == start +1:
# allowed['value'] = ''
else:
allowed['value'] = ''
if 'category' in allowed.keys():
categories = ['run_card', 'param_card']
if self.has_mw:
categories.append('MadWeight_card')
if self.has_shower:
categories.append('shower_card')
if self.has_ml:
categories.append('MadLoop_card')
if self.has_PY8:
categories.append('pythia8_card')
if self.has_delphes:
categories.append('delphes_card')
possibilities['category of parameter (optional)'] = \
self.list_completion(text, categories)
if 'shortcut' in allowed.keys():
possibilities['special values'] = self.list_completion(text, self.special_shortcut.keys()+['qcut', 'showerkt'])
if 'run_card' in allowed.keys():
opts = self.run_set
if allowed['run_card'] == 'default':
opts.append('default')
possibilities['Run Card'] = self.list_completion(text, opts)
if 'param_card' in allowed.keys():
opts = self.pname2block.keys()
if allowed['param_card'] == 'default':
opts.append('default')
possibilities['Param Card'] = self.list_completion(text, opts)
if 'madweight_card' in allowed.keys():
opts = self.mw_vars + [k for k in self.mw_card.keys() if k !='comment']
if allowed['madweight_card'] == 'default':
opts.append('default')
possibilities['MadWeight Card'] = self.list_completion(text, opts)
if 'madloop_card' in allowed.keys():
opts = self.ml_vars
if allowed['madloop_card'] == 'default':
opts.append('default')
possibilities['MadLoop Parameter'] = self.list_completion(text, opts)
if 'pythia8_card' in allowed.keys():
opts = self.py8_vars
if allowed['pythia8_card'] == 'default':
opts.append('default')
possibilities['Pythia8 Parameter'] = self.list_completion(text, opts)
if 'shower_card' in allowed.keys():
opts = self.shower_vars + [k for k in self.shower_card.keys() if k !='comment']
if allowed['shower_card'] == 'default':
opts.append('default')
possibilities['Shower Card'] = self.list_completion(text, opts)
if 'delphes_card' in allowed:
if allowed['delphes_card'] == 'default':
opts = ['default', 'atlas', 'cms']
possibilities['Delphes Card'] = self.list_completion(text, opts)
if 'value' in allowed.keys():
opts = ['default']
if 'decay' in args:
opts.append('Auto')
opts.append('Auto@NLO')
elif args[-1] in self.pname2block and self.pname2block[args[-1]][0][0] == 'decay':
opts.append('Auto')
opts.append('Auto@NLO')
possibilities['Special Value'] = self.list_completion(text, opts)
if 'block' in allowed.keys():
if allowed['block'] == 'all':
allowed_block = [i for i in self.param_card.keys() if 'qnumbers' not in i]
allowed_block.append('width')
possibilities['Param Card Block' ] = \
self.list_completion(text, allowed_block)
elif isinstance(allowed['block'], basestring):
block = self.param_card[allowed['block']].param_dict
ids = [str(i[0]) for i in block
if (allowed['block'], i) not in self.restricted_value]
possibilities['Param Card id' ] = self.list_completion(text, ids)
varname = [name for name, all_var in self.pname2block.items()
if any((bname == allowed['block']
for bname,lhaid in all_var))]
possibilities['Param card variable'] = self.list_completion(text,
varname)
else:
block = self.param_card[allowed['block'][0]].param_dict
nb = len(allowed['block'][1])
ids = [str(i[nb]) for i in block if len(i) > nb and \
[str(a) for a in i[:nb]] == allowed['block'][1]]
if not ids:
if tuple([int(i) for i in allowed['block'][1]]) in block:
opts = ['default']
if allowed['block'][0] == 'decay':
opts.append('Auto')
opts.append('Auto@NLO')
possibilities['Special value'] = self.list_completion(text, opts)
possibilities['Param Card id' ] = self.list_completion(text, ids)
if 'mw_block' in allowed.keys():
if allowed['mw_block'] == 'all':
allowed_block = [i for i in self.mw_card.keys() if 'comment' not in i]
possibilities['MadWeight Block' ] = \
self.list_completion(text, allowed_block)
elif isinstance(allowed['mw_block'], basestring):
block = self.mw_card[allowed['mw_block']]
ids = [str(i[0]) if isinstance(i, tuple) else str(i) for i in block]
possibilities['MadWeight Card id' ] = self.list_completion(text, ids)
else:
block = self.mw_card[allowed['mw_block'][0]]
nb = len(allowed['mw_block'][1])
ids = [str(i[nb]) for i in block if isinstance(i, tuple) and\
len(i) > nb and \
[str(a) for a in i[:nb]] == allowed['mw_block'][1]]
if not ids:
if tuple([i for i in allowed['mw_block'][1]]) in block or \
allowed['mw_block'][1][0] in block.keys():
opts = ['default']
possibilities['Special value'] = self.list_completion(text, opts)
possibilities['MadWeight Card id' ] = self.list_completion(text, ids)
return self.deal_multiple_categories(possibilities, formatting)
def do_set(self, line):
""" edit the value of one parameter in the card"""
args = self.split_arg(line)
if len(args) == 0:
logger.warning("No argument. For help type 'help set'.")
# fix some formatting problem
if len(args)==1 and '=' in args[-1]:
arg1, arg2 = args.pop(-1).split('=',1)
args += [arg1, arg2]
if '=' in args:
args.remove('=')
args[:-1] = [ a.lower() for a in args[:-1]]
# special shortcut:
if args[0] in self.special_shortcut:
targettypes , cmd = self.special_shortcut[args[0]]
if len(args) != len(targettypes) +1:
logger.warning('shortcut %s requires %s argument' % (args[0], len(targettypes)))
if len(args) < len(targettypes) +1:
return
else:
logger.warning('additional argument will be ignored')
values ={}
for i, argtype in enumerate(targettypes):
try:
values = {str(i): banner_mod.ConfigFile.format_variable(args[i+1], argtype, args[0])}
except ValueError as e:
logger.warning("Wrong argument: The entry #%s should be of type %s.", i+1, argtype)
return
#else:
# logger.warning("too many argument for this command")
# return
for arg in cmd:
try:
text = arg % values
except KeyError:
logger.warning("This command requires one argument")
return
except Exception as e:
logger.warning(str(e))
return
else:
self.do_set(arg % values)
return
start = 0
if len(args) < 2:
logger.warning('Invalid set command %s (need two arguments)' % line)
return
# Special case for the qcut value
if args[0].lower() == 'qcut':
pythia_path = self.paths['pythia']
if os.path.exists(pythia_path):
logger.info('add line QCUT = %s in pythia_card.dat' % args[1])
p_card = open(pythia_path,'r').read()
p_card, n = re.subn('''^\s*QCUT\s*=\s*[\de\+\-\.]*\s*$''',
''' QCUT = %s ''' % args[1], \
p_card, flags=(re.M+re.I))
if n==0:
p_card = '%s \n QCUT= %s' % (p_card, args[1])
with open(pythia_path, 'w') as fsock:
fsock.write(p_card)
return
# Special case for the showerkt value
if args[0].lower() == 'showerkt':
pythia_path = self.paths['pythia']
if os.path.exists(pythia_path):
logger.info('add line SHOWERKT = %s in pythia_card.dat' % args[1].upper())
p_card = open(pythia_path,'r').read()
p_card, n = re.subn('''^\s*SHOWERKT\s*=\s*[default\de\+\-\.]*\s*$''',
''' SHOWERKT = %s ''' % args[1].upper(), \
p_card, flags=(re.M+re.I))
if n==0:
p_card = '%s \n SHOWERKT= %s' % (p_card, args[1].upper())
with open(pythia_path, 'w') as fsock:
fsock.write(p_card)
return
card = '' #store which card need to be modify (for name conflict)
if args[0] == 'madweight_card':
if not self.mw_card:
logger.warning('Invalid Command: No MadWeight card defined.')
return
args[0] = 'MadWeight_card'
if args[0] == 'shower_card':
if not self.shower_card:
logger.warning('Invalid Command: No Shower card defined.')
return
args[0] = 'shower_card'
if args[0] == "madloop_card":
if not self.has_ml:
logger.warning('Invalid Command: No MadLoopParam card defined.')
return
args[0] = 'MadLoop_card'
if args[0] == "pythia8_card":
if not self.has_PY8:
logger.warning('Invalid Command: No Pythia8 card defined.')
return
args[0] = 'pythia8_card'
if args[0] == 'delphes_card':
if not self.has_delphes:
logger.warning('Invalid Command: No Delphes card defined.')
return
if args[1] == 'atlas':
logger.info("set default ATLAS configuration for Delphes", '$MG:color:BLACK')
files.cp(pjoin(self.me_dir,'Cards', 'delphes_card_ATLAS.dat'),
pjoin(self.me_dir,'Cards', 'delphes_card.dat'))
return
elif args[1] == 'cms':
logger.info("set default CMS configuration for Delphes",'$MG:color:BLACK')
files.cp(pjoin(self.me_dir,'Cards', 'delphes_card_CMS.dat'),
pjoin(self.me_dir,'Cards', 'delphes_card.dat'))
return
if args[0] in ['run_card', 'param_card', 'MadWeight_card', 'shower_card',
'delphes_card','madanalysis5_hadron_card','madanalysis5_parton_card']:
if args[1] == 'default':
logger.info('replace %s by the default card' % args[0],'$MG:color:BLACK')
files.cp(self.paths['%s_default' %args[0][:-5]], self.paths[args[0][:-5]])
if args[0] == 'param_card':
self.param_card = check_param_card.ParamCard(self.paths['param'])
elif args[0] == 'run_card':
self.run_card = banner_mod.RunCard(self.paths['run'])
elif args[0] == 'shower_card':
self.shower_card = shower_card_mod.ShowerCard(self.paths['shower'])
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['MadLoop_card']:
if args[1] == 'default':
logger.info('replace MadLoopParams.dat by the default card','$MG:color:BLACK')
self.MLcard = banner_mod.MadLoopParam(self.MLcardDefault)
self.MLcard.write(self.paths['ML'],
commentdefault=True)
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['pythia8_card']:
if args[1] == 'default':
logger.info('replace pythia8_card.dat by the default card','$MG:color:BLACK')
self.PY8Card = banner_mod.PY8Card(self.PY8CardDefault)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['madspin_card']:
if args[1] == 'default':
logger.info('replace madspin_card.dat by the default card','$MG:color:BLACK')
files.cp(self.paths['MS_default'], self.paths['madspin'])
return
else:
logger.warning("""Command set not allowed for modifying the madspin_card.
Check the command \"decay\" instead.""")
return
#### RUN CARD
if args[start] in [l.lower() for l in self.run_card.keys()] and card in ['', 'run_card']:
if args[start] not in self.run_set:
args[start] = [l for l in self.run_set if l.lower() == args[start]][0]
if args[start] in self.conflict and card == '':
text = 'Ambiguous name (present in more than one card). Will assume it to be referred to run_card.\n'
text += 'If this is not intended, please reset it in the run_card and specify the relevant card to \n'
text += 'edit, in the format < set card parameter value >'
logger.warning(text)
if args[start+1] == 'default':
default = banner_mod.RunCard(self.paths['run_default'])
if args[start] in default.keys():
self.setR(args[start],default[args[start]])
else:
logger.info('remove information %s from the run_card' % args[start],'$MG:color:BLACK')
del self.run_card[args[start]]
else:
if args[0].startswith('sys_') or \
args[0] in self.run_card.list_parameter or \
args[0] in self.run_card.dict_parameter:
val = ' '.join(args[start+1:])
val = val.split('#')[0]
else:
val = args[start+1]
self.setR(args[start], val)
self.run_card.write(self.paths['run'], self.paths['run_default'])
# special mode for set run_card nocut T (generated by set no_parton_cut
elif card == 'run_card' and args[start] in ['nocut', 'no_cut']:
logger.info("Going to remove all cuts from the run_card", '$MG:color:BLACK')
self.run_card.remove_all_cut()
self.run_card.write(self.paths['run'], self.paths['run_default'])
### PARAM_CARD WITH BLOCK NAME -----------------------------------------
elif (args[start] in self.param_card or args[start] == 'width') \
and card in ['','param_card']:
#special treatment for scan
if any(t.startswith('scan') for t in args):
index = [i for i,t in enumerate(args) if t.startswith('scan')][0]
args = args[:index] + [' '.join(args[index:])]
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
if args[start] == 'width':
args[start] = 'decay'
if args[start+1] in self.pname2block:
all_var = self.pname2block[args[start+1]]
key = None
for bname, lhaid in all_var:
if bname == args[start]:
key = lhaid
break
else:
logger.warning('%s is not part of block "%s" but "%s". please correct.' %
(args[start+1], args[start], bname))
return
else:
try:
key = tuple([int(i) for i in args[start+1:-1]])
except ValueError:
if args[start] == 'decay' and args[start+1:-1] == ['all']:
for key in self.param_card[args[start]].param_dict:
if (args[start], key) in self.restricted_value:
continue
else:
self.setP(args[start], key, args[-1])
self.param_card.write(self.paths['param'])
return
logger.warning('invalid set command %s (failed to identify LHA information)' % line)
return
if key in self.param_card[args[start]].param_dict:
if (args[start], key) in self.restricted_value:
text = "Note that this parameter seems to be ignore by MG.\n"
text += "MG will use instead the expression: %s\n" % \
self.restricted_value[(args[start], key)]
text += "You need to match this expression for external program (such pythia)."
logger.warning(text)
if args[-1].lower() in ['default', 'auto', 'auto@nlo'] or args[-1].startswith('scan'):
self.setP(args[start], key, args[-1])
else:
try:
value = float(args[-1])
except Exception:
logger.warning('Invalid input: Expected number and not \'%s\'' \
% args[-1])
return
self.setP(args[start], key, value)
else:
logger.warning('invalid set command %s' % line)
return
self.param_card.write(self.paths['param'])
# PARAM_CARD NO BLOCK NAME ---------------------------------------------
elif args[start] in self.pname2block and card in ['','param_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
all_var = self.pname2block[args[start]]
for bname, lhaid in all_var:
new_line = 'param_card %s %s %s' % (bname,
' '.join([ str(i) for i in lhaid]), ' '.join(args[start+1:]))
self.do_set(new_line)
if len(all_var) > 1:
logger.warning('This variable correspond to more than one parameter in the param_card.')
for bname, lhaid in all_var:
logger.warning(' %s %s' % (bname, ' '.join([str(i) for i in lhaid])))
logger.warning('all listed variables have been modified')
# MadWeight_card with block name ---------------------------------------
elif self.has_mw and (args[start] in self.mw_card and args[start] != 'comment') \
and card in ['','MadWeight_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
block = args[start]
name = args[start+1]
value = args[start+2:]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
# MadWeight_card NO Block name -----------------------------------------
elif self.has_mw and args[start] in self.mw_vars \
and card in ['', 'MadWeight_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
block = [b for b, data in self.mw_card.items() if args[start] in data]
if len(block) > 1:
logger.warning('%s is define in more than one block: %s.Please specify.'
% (args[start], ','.join(block)))
return
block = block[0]
name = args[start]
value = args[start+1:]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
# MadWeight_card New Block ---------------------------------------------
elif self.has_mw and args[start].startswith('mw_') and len(args[start:]) == 3\
and card == 'MadWeight_card':
block = args[start]
name = args[start+1]
value = args[start+2]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
#### SHOWER CARD
elif self.has_shower and args[start].lower() in [l.lower() for l in \
self.shower_card.keys()] and card in ['', 'shower_card']:
if args[start] not in self.shower_card:
args[start] = [l for l in self.shower_card if l.lower() == args[start].lower()][0]
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
if args[start+1].lower() == 'default':
default = shower_card_mod.ShowerCard(self.paths['shower_default'])
if args[start] in default.keys():
self.shower_card.set_param(args[start],default[args[start]], self.paths['shower'])
else:
logger.info('remove information %s from the shower_card' % args[start],'$MG:color:BLACK')
del self.shower_card[args[start]]
elif args[start+1].lower() in ['t','.true.','true']:
self.shower_card.set_param(args[start],'.true.',self.paths['shower'])
elif args[start+1].lower() in ['f','.false.','false']:
self.shower_card.set_param(args[start],'.false.',self.paths['shower'])
elif args[start] in ['analyse', 'extralibs', 'extrapaths', 'includepaths'] or\
args[start].startswith('dm_'):
#case sensitive parameters
args = line.split()
args_str = ' '.join(str(a) for a in args[start+1:len(args)])
self.shower_card.set_param(args[start],args_str,pjoin(self.me_dir,'Cards','shower_card.dat'))
else:
args_str = ' '.join(str(a) for a in args[start+1:len(args)])
self.shower_card.set_param(args[start],args_str,self.paths['shower'])
# MadLoop Parameter ---------------------------------------------------
elif self.has_ml and args[start] in self.ml_vars \
and card in ['', 'MadLoop_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
logger.warning(text)
return
if args[start+1] == 'default':
value = self.MLcardDefault[args[start]]
default = True
else:
value = args[start+1]
default = False
self.setML(args[start], value, default=default)
self.MLcard.write(self.paths['ML'],
commentdefault=True)
# Pythia8 Parameter ---------------------------------------------------
elif self.has_PY8 and (card == 'pythia8_card' or (card == '' and \
args[start] in self.PY8Card)):
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
logger.warning(text)
return
if args[start+1] == 'default':
value = self.PY8CardDefault[args[start]]
default = True
else:
value = ' '.join(args[start+1:])
default = False
self.setPY8(args[start], value, default=default)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
#INVALID --------------------------------------------------------------
else:
logger.warning('invalid set command %s ' % line)
arg = args[start].lower()
if self.has_PY8:
close_opts = [name for name in self.PY8Card if name.lower().startswith(arg[:3]) or arg in name.lower()]
if close_opts:
logger.info('Did you mean one of the following PY8 options:\n%s' % '\t'.join(close_opts))
if self.run_card:
close_opts = [name for name in self.run_card if name.lower().startswith(arg[:3]) or arg in name.lower()]
if close_opts:
logger.info('Did you mean one of the following run_card options:\n%s' % '\t'.join(close_opts))
return
def setM(self, block, name, value):
if isinstance(value, list) and len(value) == 1:
value = value[0]
if block not in self.mw_card:
logger.warning('block %s was not present in the current MadWeight card. We are adding it' % block)
self.mw_card[block] = {}
elif name not in self.mw_card[block]:
logger.info('name %s was not present in the block %s for the current MadWeight card. We are adding it' % (name,block),'$MG:color:BLACK')
if value == 'default':
import madgraph.madweight.Cards as mwcards
mw_default = mwcards.Card(self.paths['MadWeight_default'])
try:
value = mw_default[block][name]
except KeyError:
logger.info('removing id "%s" from Block "%s" '% (name, block),'$MG:color:BLACK')
if name in self.mw_card[block]:
del self.mw_card[block][name]
return
if value:
logger.info('modify madweight_card information BLOCK "%s" with id "%s" set to %s',
block, name, value, '$MG:color:BLACK')
else:
logger.warning("Invalid command: No value. To set default value. Use \"default\" as value")
return
self.mw_card[block][name] = value
def setR(self, name, value):
logger.info('modify parameter %s of the run_card.dat to %s' % (name, value),'$MG:color:BLACK')
self.run_card.set(name, value, user=True)
def setML(self, name, value, default=False):
try:
self.MLcard.set(name, value, user=True)
except Exception, error:
logger.warning("Fail to change parameter. Please Retry. Reason: %s." % error)
return
logger.info('modify parameter %s of the MadLoopParam.dat to %s' % (name, value),'$MG:color:BLACK')
if default and name.lower() in self.MLcard.user_set:
self.MLcard.user_set.remove(name.lower())
def setPY8(self, name, value, default=False):
try:
self.PY8Card.userSet(name, value)
except Exception, error:
logger.warning("Fail to change parameter. Please Retry. Reason: %s." % error)
return
logger.info('modify parameter %s of the pythia8_card.dat to %s' % (name, value), '$MG:color:BLACK')
if default and name.lower() in self.PY8Card.user_set:
self.PY8Card.user_set.remove(name.lower())
def setP(self, block, lhaid, value):
if isinstance(value, str):
value = value.lower()
if value == 'default':
default = check_param_card.ParamCard(self.paths['param_default'])
value = default[block].param_dict[lhaid].value
elif value in ['auto', 'auto@nlo']:
if 'nlo' in value:
value = 'Auto@NLO'
else:
value = 'Auto'
if block != 'decay':
logger.warning('Invalid input: \'Auto\' value only valid for DECAY')
return
elif value.startswith('scan'):
if ':' not in value:
logger.warning('Invalid input: \'scan\' mode requires a \':\' before the definition.')
return
tag = value.split(':')[0]
tag = tag[4:].strip()
if tag and not tag.isdigit():
logger.warning('Invalid input: scan tag need to be integer and not "%s"' % tag)
return
pass
else:
try:
value = float(value)
except ValueError:
logger.warning('Invalid input: \'%s\' not valid intput.'% value)
logger.info('modify param_card information BLOCK %s with id %s set to %s' %\
(block, lhaid, value), '$MG:color:BLACK')
self.param_card[block].param_dict[lhaid].value = value
def check_card_consistency(self):
"""This is run on quitting the class. Apply here all the self-consistency
rule that you want. Do the modification via the set command."""
# if NLO reweighting is ON: ensure that we keep the rwgt information
if 'reweight' in self.allow_arg and 'run' in self.allow_arg and \
isinstance(self.run_card,banner_mod.RunCardNLO) and \
not self.run_card['store_rwgt_info']:
#check if a NLO reweighting is required
re_pattern = re.compile(r'''^\s*change\s*mode\s* (LO\+NLO|LO|NLO|NLO_tree)\s*(?:#|$)''', re.M+re.I)
text = open(self.paths['reweight']).read()
options = re_pattern.findall(text)
if any(o in ['NLO', 'LO+NLO'] for o in options):
logger.info('NLO reweighting is on ON. Automatically set store_rwgt_info to True', '$MG:color:BLACK' )
self.do_set('run_card store_rwgt_info True')
# if external computation for the systematics are asked then switch
#automatically the book-keeping of the weight for NLO
if 'run' in self.allow_arg and \
self.run_card['systematics_program'] == 'systematics' and \
isinstance(self.run_card,banner_mod.RunCardNLO) and \
not self.run_card['store_rwgt_info']:
logger.warning('To be able to run systematics program, we set store_rwgt_info to True')
self.do_set('run_card store_rwgt_info True')
# @LO if PY6 shower => event_norm on sum
if 'pythia_card.dat' in self.cards:
if self.run_card['event_norm'] != 'sum':
logger.info('Pythia6 needs a specific normalisation of the events. We will change it accordingly.', '$MG:color:BLACK' )
self.do_set('run_card event_norm sum')
# @LO if PY6 shower => event_norm on sum
elif 'pythia8_card.dat' in self.cards:
if self.run_card['event_norm'] == 'sum':
logger.info('Pythia8 needs a specific normalisation of the events. We will change it accordingly.', '$MG:color:BLACK' )
self.do_set('run_card event_norm average')
# Check the extralibs flag.
if self.has_shower and isinstance(self.run_card, banner_mod.RunCardNLO):
modify_extralibs, modify_extrapaths = False,False
extralibs = self.shower_card['extralibs'].split()
extrapaths = self.shower_card['extrapaths'].split()
# remove default stdhep/Fmcfio for recent shower
if self.run_card['parton_shower'] in ['PYTHIA8', 'HERWIGPP', 'HW7']:
if 'stdhep' in self.shower_card['extralibs']:
extralibs.remove('stdhep')
modify_extralibs = True
if 'Fmcfio' in self.shower_card['extralibs']:
extralibs.remove('Fmcfio')
modify_extralibs = True
if self.run_card['parton_shower'] == 'PYTHIA8':
# First check sanity of PY8
if not self.mother_interface.options['pythia8_path']:
raise self.mother_interface.InvalidCmd, 'Pythia8 is not correctly specified to MadGraph5_aMC@NLO'
executable = pjoin(self.mother_interface.options['pythia8_path'], 'bin', 'pythia8-config')
if not os.path.exists(executable):
raise self.mother.InvalidCmd, 'Pythia8 is not correctly specified to MadGraph5_aMC@NLO'
# 2. take the compilation flag of PY8 from pythia8-config
libs , paths = [], []
p = misc.subprocess.Popen([executable, '--libs'], stdout=subprocess.PIPE)
stdout, _ = p. communicate()
libs = [x[2:] for x in stdout.split() if x.startswith('-l') or paths.append(x[2:])]
# Add additional user-defined compilation flags
p = misc.subprocess.Popen([executable, '--config'], stdout=subprocess.PIPE)
stdout, _ = p. communicate()
for lib in ['-ldl','-lstdc++','-lc++']:
if lib in stdout:
libs.append(lib[2:])
# This precompiler flag is in principle useful for the analysis if it writes HEPMC
# events, but there is unfortunately no way for now to specify it in the shower_card.
supports_HEPMCHACK = '-DHEPMC2HACK' in stdout
#3. ensure that those flag are in the shower card
for l in libs:
if l not in extralibs:
modify_extralibs = True
extralibs.append(l)
for L in paths:
if L not in extrapaths:
modify_extrapaths = True
extrapaths.append(L)
# Apply the required modification
if modify_extralibs:
if extralibs:
self.do_set('shower_card extralibs %s ' % ' '.join(extralibs))
else:
self.do_set('shower_card extralibs None ')
if modify_extrapaths:
if extrapaths:
self.do_set('shower_card extrapaths %s ' % ' '.join(extrapaths))
else:
self.do_set('shower_card extrapaths None ')
def reask(self, *args, **opt):
cmd.OneLinePathCompletion.reask(self,*args, **opt)
if self.has_mw and not os.path.exists(pjoin(self.me_dir,'Cards','transfer_card.dat')):
logger.warning('No transfer function currently define. Please use the change_tf command to define one.')
fail_due_to_format = 0 #parameter to avoid infinite loop
def postcmd(self, stop, line):
ending_question = cmd.OneLinePathCompletion.postcmd(self,stop,line)
if ending_question:
self.check_card_consistency()
try:
self.do_update('dependent', timer=20)
except MadGraph5Error, error:
if 'Missing block:' in str(error):
self.fail_due_to_format +=1
if self.fail_due_to_format == 10:
missing, unknow = str(error).split('\n')[-2:]
logger.warning("Invalid param_card:\n%s\n%s\n" % (missing, unknow))
logger.info("Type \"update missing\" to use default value.\n ", '$MG:color:BLACK')
self.value = False # to avoid that entering a command stop the question
return self.reask(True)
else:
raise
return ending_question
def do_update(self, line, timer=0):
""" syntax: update dependent: Change the mass/width of particles which are not free parameter for the model.
update missing: add to the current param_card missing blocks/parameters.
update to_slha1: pass SLHA2 card to SLHA1 convention. (beta)
update to_slha2: pass SLHA1 card to SLHA2 convention. (beta)"""
args = self.split_arg(line)
if len(args)==0:
logger.warning('miss an argument (dependent or missing). Please retry')
return
if args[0] == 'dependent':
if not self.mother_interface:
logger.warning('Failed to update dependent parameter. This might create trouble for external program (like MadSpin/shower/...)')
pattern_width = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
pattern_scan = re.compile(r'''^(decay)?[\s\d]*scan''', re.I+re.M)
param_text= open(self.paths['param']).read()
if pattern_scan.search(param_text):
#for block, key in self.restricted_value:
# self.param_card[block].get(key).value = -9.999e-99
# self.param_card.write(self.paths['param'])
return
elif pattern_width.search(param_text):
self.do_compute_widths('')
self.param_card = check_param_card.ParamCard(self.paths['param'])
# calling the routine doing the work
self.update_dependent(self.mother_interface, self.me_dir, self.param_card,
self.paths['param'], timer)
elif args[0] == 'missing':
self.update_missing()
return
elif args[0] == 'to_slha2':
try:
check_param_card.convert_to_mg5card(self.paths['param'])
logger.info('card updated')
except Exception, error:
logger.warning('failed to update to slha2 due to %s' % error)
self.param_card = check_param_card.ParamCard(self.paths['param'])
elif args[0] == 'to_slha1':
try:
check_param_card.convert_to_slha1(self.paths['param'])
logger.info('card updated')
except Exception, error:
logger.warning('failed to update to slha1 due to %s' % error)
self.param_card = check_param_card.ParamCard(self.paths['param'])
@staticmethod
def update_dependent(mecmd, me_dir, param_card, path ,timer=0):
"""static method which can also be called from outside the class
usefull in presence of scan.
return if the param_card was updated or not
"""
logger.info('Update the dependent parameter of the param_card.dat')
modify = True
class TimeOutError(Exception):
pass
def handle_alarm(signum, frame):
raise TimeOutError
signal.signal(signal.SIGALRM, handle_alarm)
if timer:
signal.alarm(timer)
log_level=30
else:
log_level=20
# Try to load the model in the limited amount of time allowed
try:
model = mecmd.get_model()
signal.alarm(0)
except TimeOutError:
logger.warning('The model takes too long to load so we bypass the updating of dependent parameter.\n'+\
'This might create trouble for external program (like MadSpin/shower/...)\n'+\
'The update can be forced without timer by typing \'update dependent\' at the time of the card edition')
modify =False
except Exception,error:
logger.debug(str(error))
logger.warning('Failed to update dependent parameter. This might create trouble for external program (like MadSpin/shower/...)')
signal.alarm(0)
else:
restrict_card = pjoin(me_dir,'Source','MODEL','param_card_rule.dat')
if not os.path.exists(restrict_card):
restrict_card = None
#restrict_card = None
if model:
modify = param_card.update_dependent(model, restrict_card, log_level)
if modify and path:
param_card.write(path)
else:
logger.warning('missing MG5aMC code. Fail to update dependent parameter. This might create trouble for program like MadSpin/shower/...')
if log_level==20:
logger.info('param_card up to date.')
return modify
def update_missing(self):
def check_block(self, blockname):
add_entry = 0
if blockname.lower() not in self.param_card_default:
logger.info('unknow block %s: block will be ignored', blockname)
return add_entry
block = self.param_card_default[blockname]
for key in block.keys():
if key not in input_in_block:
param = block.get(key)
if blockname != 'decay':
text.append('\t%s\t%s # %s\n' % (' \t'.join([`i` for i in param.lhacode]), param.value, param.comment))
else:
text.append('DECAY \t%s\t%s # %s\n' % (' \t'.join([`i` for i in param.lhacode]), param.value, param.comment))
add_entry += 1
if add_entry:
text.append('\n')
if add_entry:
logger.info("Adding %s parameter(s) to block %s", add_entry, blockname)
return add_entry
# Add to the current param_card all the missing input at default value
current_block = ''
input_in_block = set()
defined_blocks = set()
decay = set()
text = []
add_entry = 0
for line in open(self.paths['param']):
new_block = re.findall(r'^\s*(block|decay)\s*(\w*)', line, re.I)
if new_block:
new_block = new_block[0]
defined_blocks.add(new_block[1].lower())
if current_block:
add_entry += check_block(self, current_block)
current_block= new_block[1]
input_in_block = set()
if new_block[0].lower() == 'decay':
decay.add((int(new_block[1]),))
current_block = ''
if new_block[1].lower() == 'qnumbers':
current_block = ''
text.append(line)
if not current_block:
continue
#normal line.
#strip comment
line = line.split('#',1)[0]
split = line.split()
if not split:
continue
else:
try:
lhacode = [int(i) for i in split[:-1]]
except:
continue
input_in_block.add(tuple(lhacode))
if current_block:
add_entry += check_block(self, current_block)
# special check for missing block
for block in self.param_card_default:
if block.startswith(('qnumbers', 'decay')):
continue
if block not in defined_blocks:
nb_entry = len(self.param_card_default[block])
logger.info("Block %s was missing. Adding the %s associated parameter(s)", block,nb_entry)
add_entry += nb_entry
text.append(str(self.param_card_default[block]))
# special check for the decay
input_in_block = decay
add_entry += check_block(self, 'decay')
if add_entry:
logger.info('write new param_card with %s new parameter(s).', add_entry, '$MG:color:BLACK')
open(self.paths['param'],'w').write(''.join(text))
self.reload_card(self.paths['param'])
else:
logger.info('No missing parameter detected.', '$MG:color:BLACK')
def check_answer_consistency(self):
"""function called if the code reads a file"""
self.check_card_consistency()
self.do_update('dependent', timer=20)
def help_set(self):
'''help message for set'''
logger.info('********************* HELP SET ***************************')
logger.info("syntax: set [run_card|param_card|...] NAME [VALUE|default]")
logger.info("syntax: set [param_card] BLOCK ID(s) [VALUE|default]")
logger.info('')
logger.info('-- Edit the param_card/run_card/... and replace the value of the')
logger.info(' parameter by the value VALUE.')
logger.info(' ')
logger.info('-- Example:')
logger.info(' set run_card ebeam1 4000')
logger.info(' set ebeam2 4000')
logger.info(' set lpp1 0')
logger.info(' set ptj default')
logger.info('')
logger.info(' set param_card mass 6 175')
logger.info(' set mass 25 125.3')
logger.info(' set mass mh 125')
logger.info(' set mh 125')
logger.info(' set decay 25 0.004')
logger.info(' set decay wh 0.004')
logger.info(' set vmix 2 1 2.326612e-01')
logger.info('')
logger.info(' set param_card default #return all parameter to default')
logger.info(' set run_card default')
logger.info('********************* HELP SET ***************************')
def default(self, line):
"""Default action if line is not recognized"""
line = line.strip()
args = line.split()
if line == '' and self.default_value is not None:
self.value = self.default_value
# check if input is a file
elif hasattr(self, 'do_%s' % args[0]):
self.do_set(' '.join(args[1:]))
elif os.path.isfile(line):
self.copy_file(line)
self.value = 'repeat'
elif self.me_dir and os.path.exists(pjoin(self.me_dir, line)):
self.copy_file(pjoin(self.me_dir,line))
self.value = 'repeat'
elif line.strip() != '0' and line.strip() != 'done' and \
str(line) != 'EOF' and line.strip() in self.allow_arg:
self.open_file(line)
self.value = 'repeat'
elif line.strip().startswith(('http:','www')):
self.value = 'repeat'
import tempfile
fsock, path = tempfile.mkstemp()
try:
text = urllib.urlopen(line.strip())
except Exception:
logger.error('fail to load the file')
else:
for line in text:
os.write(fsock, line)
os.close(fsock)
self.copy_file(path)
os.remove(path)
else:
self.value = line
return line
def do_decay(self, line):
"""edit the madspin_card to define the decay of the associate particle"""
signal.alarm(0) # avoid timer if any
path = self.paths['madspin']
if 'madspin_card.dat' not in self.cards or not os.path.exists(path):
logger.warning("Command decay not valid. Since MadSpin is not available.")
return
if ">" not in line:
logger.warning("invalid command for decay. Line ignored")
return
if "-add" in line:
# just to have to add the line to the end of the file
particle = line.split('>')[0].strip()
text = open(path).read()
line = line.replace('--add', '').replace('-add','')
logger.info("change madspin_card to add one decay to %s: %s" %(particle, line.strip()), '$MG:color:BLACK')
if 'launch' in text:
text = text.replace('launch', "\ndecay %s\nlaunch\n" % line,1)
else:
text += '\ndecay %s\n launch \n' % line
else:
# Here we have to remove all the previous definition of the decay
#first find the particle
particle = line.split('>')[0].strip()
logger.info("change madspin_card to define the decay of %s: %s" %(particle, line.strip()), '$MG:color:BLACK')
particle = particle.replace('+','\+').replace('-','\-')
decay_pattern = re.compile(r"^\s*decay\s+%s\s*>[\s\w+-~]*?$" % particle, re.I+re.M)
text= open(path).read()
text = decay_pattern.sub('', text)
if 'launch' in text:
text = text.replace('launch', "\ndecay %s\nlaunch\n" % line,1)
else:
text += '\ndecay %s\n launch \n' % line
with open(path,'w') as fsock:
fsock.write(text)
self.reload_card(path)
def do_compute_widths(self, line):
signal.alarm(0) # avoid timer if any
path = self.paths['param']
pattern = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
text = open(path).read()
pdg_info = pattern.findall(text)
has_nlo = any("@nlo"==nlo.lower() for _, nlo in pdg_info)
pdg = [p for p,_ in pdg_info]
line = '%s %s' % (line, ' '.join(pdg))
if not '--path' in line:
line += ' --path=%s' % path
if has_nlo:
line += ' --nlo'
try:
return self.mother_interface.do_compute_widths(line)
except InvalidCmd, error:
logger.error("Invalid command: %s " % error)
def help_compute_widths(self):
signal.alarm(0) # avoid timer if any
return self.mother_interface.help_compute_widths()
def help_decay(self):
"""help for command decay which modifies MadSpin_card"""
signal.alarm(0) # avoid timer if any
print '--syntax: decay PROC [--add]'
print ' '
print ' modify the madspin_card to modify the decay of the associate particle.'
print ' and define it to PROC.'
print ' if --add is present, just add a new decay for the associate particle.'
def complete_compute_widths(self, *args, **opts):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
return self.mother_interface.complete_compute_widths(*args,**opts)
def help_add(self):
"""help for add command"""
logger.info('********************* HELP ADD ***************************')
logger.info( '-- syntax: add pythia8_card NAME VALUE')
logger.info( " add a definition of name in the pythia8_card with the given value")
logger.info( " Do not work for the param_card" )
logger.info( '-- syntax: add filename [OPTION] line')
logger.info( ' add the given LINE to the end of the associate file (all file supportedd).')
logger.info( ' OPTION parameter allows to change the position where to write in the file')
logger.info( ' --after_line=banner : write the line at the end of the banner')
logger.info( ' --line_position=X : insert the line before line X (starts at 0)')
logger.info( ' --after_line="<regular-expression>" write the line after the first line matching the regular expression')
logger.info( ' --before_line="<regular-expression>" write the line before the first line matching the regular expression')
logger.info(' --clean remove all previously existing line in the file')
logger.info( ' example: change reweight --after_line="^\s*change mode" change model heft')
logger.info('********************* HELP ADD ***************************')
def complete_add(self, text, line, begidx, endidx, formatting=True):
""" auto-completion for add command"""
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
split = line[:begidx].split()
if len(split)==1:
possibilities = {}
cards = [c.rsplit('.',1)[0] for c in self.cards]
possibilities['category of parameter (optional)'] = \
self.list_completion(text, cards)
elif len(split) == 2:
possibilities = {}
options = ['--line_position=','--after_line=banner', '--after_line="','--before_line="']
possibilities['category of parameter (optional)'] = \
self.list_completion(text, options, line)
else:
return
return self.deal_multiple_categories(possibilities, formatting)
def do_add(self, line):
""" syntax: add filename NAME VALUE
syntax: add filename LINE"""
args = self.split_arg(line)
if len(args) == 3 and args[0] in ['pythia8_card', 'pythia8_card.dat'] and self.has_PY8:
name= args[1]
value = args[2]
self.PY8Card.userSet(name, value)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
logger.info("add in the pythia8_card the parameter \"%s\" with value \"%s\"" % (name, value), '$MG:color:BLACK')
elif len(args) > 0:
if args[0] in self.cards:
card = args[0]
elif "%s.dat" % args[0] in self.cards:
card = "%s.dat" % args[0]
elif "%s_card.dat" % args[0] in self.cards:
card = "%s_card.dat" % args[0]
elif self.has_ml and args[0].lower() == "madloop":
card = "MadLoopParams.dat"
else:
logger.error("unknow card %s. Please retry." % args[0])
return
# handling the various option on where to write the line
if args[1] == '--clean':
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write("# %s \n" % card)
ff.write("%s \n" % line.split(None,2)[2])
ff.close()
logger.info("writing the line in %s (empty file) the line: \"%s\"" %(card, line.split(None,2)[2] ),'$MG:color:BLACK')
elif args[1].startswith('--line_position='):
#position in file determined by user
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
pos = int(args[1].split('=',1)[1])
newline = line.split(None,2)[2]
split.insert(pos, newline)
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(pos, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--after_line=banner'):
# write the line at the first not commented line
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
for posline,l in enumerate(split):
if not l.startswith('#'):
break
split.insert(posline, line.split(None,2)[2])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--before_line='):
# catch the line/regular expression and write before that line
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
search_pattern=r'''before_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = re.search(search_pattern, line).group()[13:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
raise Exception, 'invalid regular expression: not found in file'
split.insert(posline, re.split(search_pattern,line)[-1])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--after_line='):
# catch the line/regular expression and write after that line
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
search_pattern = r'''after_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = re.search(search_pattern, line).group()[12:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
posline=len(split)
split.insert(posline+1, re.split(search_pattern,line)[-1])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
else:
ff = open(pjoin(self.me_dir,'Cards',card),'a')
ff.write("%s \n" % line.split(None,1)[1])
ff.close()
logger.info("adding at the end of the file %s the line: \"%s\"" %(card, line.split(None,1)[1] ),'$MG:color:BLACK')
self.reload_card(pjoin(self.me_dir,'Cards',card))
def help_asperge(self):
"""Help associated to the asperge command"""
signal.alarm(0)
print '-- syntax: asperge [options]'
print ' Call ASperGe to diagonalize all mass matrices in the model.'
print ' This works only if the ASperGE module is part of the UFO model (a subdirectory).'
print ' If you specify some names after the command (i.e. asperge m1 m2) then ASperGe will only'
print ' diagonalize the associate mass matrices (here m1 and m2).'
def complete_asperge(self, text, line, begidx, endidx, formatting=True):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
blockname = self.pname2block.keys()
# remove those that we know for sure are not mixing
wrong = ['decay', 'mass', 'sminput']
valid = [k for k in blockname if 'mix' in k]
potential = [k for k in blockname if k not in valid+wrong]
output = {'Mixing matrices': self.list_completion(text, valid, line),
'Other potential valid input': self.list_completion(text, potential, line)}
return self.deal_multiple_categories(output, formatting)
def do_asperge(self, line):
"""Running ASperGe"""
signal.alarm(0) # avoid timer if any
path = pjoin(self.me_dir,'bin','internal','ufomodel','ASperGE')
if not os.path.exists(path):
logger.error('ASperge has not been detected in the current model, therefore it will not be run.')
return
elif not os.path.exists(pjoin(path,'ASperGe')):
logger.info('ASperGe has been detected but is not compiled. Running the compilation now.')
try:
misc.compile(cwd=path,shell=True)
except MadGraph5Error, error:
logger.error('''ASperGe failed to compile. Note that gsl is needed
for this compilation to go trough. More information on how to install this package on
http://www.gnu.org/software/gsl/
Full compilation log is available at %s''' % pjoin(self.me_dir, 'ASperge_compilation.log'))
open(pjoin(self.me_dir, 'ASperge_compilation.log'),'w').write(str(error))
return
opts = line.split()
card = self.paths['param']
logger.info('running ASperGE')
returncode = misc.call([pjoin(path,'ASperGe'), card, '%s.new' % card] + opts)
if returncode:
logger.error('ASperGE fails with status %s' % returncode)
else:
logger.info('AsPerGe creates the file succesfully')
files.mv(card, '%s.beforeasperge' % card)
files.mv('%s.new' % card, card)
def copy_file(self, path):
"""detect the type of the file and overwritte the current file"""
if path.endswith('.lhco'):
#logger.info('copy %s as Events/input.lhco' % (path))
#files.cp(path, pjoin(self.mother_interface.me_dir, 'Events', 'input.lhco' ))
self.do_set('mw_run inputfile %s' % os.path.relpath(path, self.mother_interface.me_dir))
return
elif path.endswith('.lhco.gz'):
#logger.info('copy %s as Events/input.lhco.gz' % (path))
#files.cp(path, pjoin(self.mother_interface.me_dir, 'Events', 'input.lhco.gz' ))
self.do_set('mw_run inputfile %s' % os.path.relpath(path, self.mother_interface.me_dir))
return
else:
card_name = CommonRunCmd.detect_card_type(path)
if card_name == 'unknown':
logger.warning('Fail to determine the type of the file. Not copied')
if card_name != 'banner':
logger.info('copy %s as %s' % (path, card_name))
files.cp(path, self.paths[card_name.split('_',1)[0]])
self.reload_card(self.paths[card_name.split('_',1)[0]])
elif card_name == 'banner':
banner_mod.split_banner(path, self.mother_interface.me_dir, proc_card=False)
logger.info('Splitting the banner in it\'s component')
if not self.mode == 'auto':
self.mother_interface.keep_cards(self.cards)
for card_name in self.cards:
self.reload_card(pjoin(self.me_dir, 'Cards', card_name))
def open_file(self, answer):
"""open the file"""
try:
me_dir = self.mother_interface.me_dir
except:
me_dir = None
if answer.isdigit():
if answer == '9':
answer = 'plot'
else:
answer = self.cards[int(answer)-1]
if 'madweight' in answer:
answer = answer.replace('madweight', 'MadWeight')
elif 'MadLoopParams' in answer:
answer = self.paths['ML']
elif 'pythia8_card' in answer:
answer = self.paths['pythia8']
if os.path.exists(answer):
path = answer
else:
if not '.dat' in answer and not '.lhco' in answer:
if answer != 'trigger':
path = self.paths[answer]
else:
path = self.paths['delphes']
elif not '.lhco' in answer:
if '_' in answer:
path = self.paths['_'.join(answer.split('_')[:-1])]
else:
path = pjoin(me_dir, 'Cards', answer)
else:
path = pjoin(me_dir, self.mw_card['mw_run']['inputfile'])
if not os.path.exists(path):
logger.info('Path in MW_card not existing')
path = pjoin(me_dir, 'Events', answer)
#security
path = path.replace('_card_card','_card')
try:
self.mother_interface.exec_cmd('open %s' % path)
except InvalidCmd, error:
if str(error) != 'No default path for this file':
raise
if answer == 'transfer_card.dat':
logger.warning('You have to specify a transfer function first!')
elif answer == 'input.lhco':
path = pjoin(me_dir,'Events', 'input.lhco')
ff = open(path,'w')
ff.write('''No LHCO information imported at current time.
To import a lhco file: Close this file and type the path of your file.
You can also copy/paste, your event file here.''')
ff.close()
self.open_file(path)
else:
raise
self.reload_card(path)
def reload_card(self, path):
"""reload object to have it in sync"""
if path == self.paths['param']:
try:
self.param_card = check_param_card.ParamCard(path)
except (check_param_card.InvalidParamCard, ValueError) as e:
logger.error('Current param_card is not valid. We are going to use the default one.')
logger.error('problem detected: %s' % e)
logger.error('Please re-open the file and fix the problem.')
logger.warning('using the \'set\' command without opening the file will discard all your manual change')
elif path == self.paths['run']:
self.run_card = banner_mod.RunCard(path)
elif path == self.paths['shower']:
self.shower_card = shower_card_mod.ShowerCard(path)
elif path == self.paths['ML']:
self.MLcard = banner_mod.MadLoopParam(path)
elif path == self.paths['pythia8']:
# Use the read function so that modified/new parameters are correctly
# set as 'user_set'
if not self.PY8Card:
self.PY8Card = banner_mod.PY8Card(self.paths['pythia8_default'])
self.PY8Card.read(self.paths['pythia8'], setter='user')
self.py8_vars = [k.lower() for k in self.PY8Card.keys()]
elif path == self.paths['MadWeight']:
try:
import madgraph.madweight.Cards as mwcards
except:
import internal.madweight.Cards as mwcards
self.mw_card = mwcards.Card(path)
else:
logger.debug('not keep in sync: %s', path)
return path
class EditParamCard(AskforEditCard):
"""a dedicated module for the param"""
special_shortcut ={}
def __init__(self, question, card=[], mode='auto', *args, **opt):
self.load_default()
cmd.OneLinePathCompletion.__init__(self, question, *args, **opt)
if os.path.isfile(card[0]):
self.param_card = check_param_card.ParamCard(card[0])
self.paths['param'] = card[0]
if os.path.isfile(card[0].replace('.dat', '_default.dat')):
self.paths['param_default'] = card[0].replace('.dat', '_default.dat')
else:
self.paths['param_default'] = card[0]
else:
raise Exception, 'path %s do not exists' % card[0]
self.pname2block, self.restricted_value = self.param_card.analyze_param_card()
self.cards=['param']
def do_asperge(self, *args, **opts):
"Not available"
logger.warning("asperge not available in this mode")
|
[] |
[] |
[
"LHAPATH",
"MADGRAPH_BASE",
"LHAPDF_DATA_PATH",
"CLUSTER_LHAPATH",
"HOME"
] |
[]
|
["LHAPATH", "MADGRAPH_BASE", "LHAPDF_DATA_PATH", "CLUSTER_LHAPATH", "HOME"]
|
python
| 5 | 0 | |
doc/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'pydemic'
copyright = '2020, George N Wong and Zachary J Weiner'
author = 'George N Wong and Zachary J Weiner'
import pkg_resources
version = pkg_resources.get_distribution(project).version
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.ifconfig',
# 'sphinx_copybutton'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/docs/', None),
'emcee': ('https://emcee.readthedocs.io/en/stable/', None),
}
latex_elements = {
'maxlistdepth': '99',
}
autodoc_mock_imports = ['sympy', 'h5py']
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# setup copy button thing
def setup(app):
app.add_config_value('on_rtd', on_rtd, 'env')
app.add_javascript('copybutton.js')
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
copied from numpy's conf.py
"""
if domain != 'py':
return None
import sys
import inspect
from os.path import relpath, dirname
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
import pydemic
fn = relpath(fn, start=dirname(pydemic.__file__))
return "https://github.com/uiuc-covid19-modeling/pydemic/blob/master/pydemic/%s%s" % (
fn, linespec)
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
types/server.go
|
package types
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/gorilla/mux"
)
// Server handles incoming resourceful requests.
type Server struct {
repo Repository
Router *mux.Router
}
func (s *Server) routes() {
s.Router.HandleFunc("/inbox", s.withSlackVerification(s.handleInbox())).Methods("POST")
}
func (s *Server) handleInbox() http.HandlerFunc {
return func(response http.ResponseWriter, request *http.Request) {
type slashCommandResponse struct {
Type string `json:"response_type"`
Text string `json:"text"`
Attachments []struct {
Text string `json:"text"`
} `json:"attachments"`
}
request.ParseForm()
command := request.FormValue("text")
commandParts := strings.Fields(command)
if len(commandParts) > 3 {
resourceType := commandParts[1]
uri := commandParts[len(commandParts)-1]
url, _ := url.Parse(uri)
title := strings.Join(commandParts[2:len(commandParts)-1], " ")
resource := Resource{
Type: resourceType,
Title: title,
URL: *url,
}
err := s.repo.Add(resource)
if err != nil {
http.Error(response, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
cmdResponse := slashCommandResponse{
Type: "in_channel",
Text: "I got you fam. Adding that resource right now.",
Attachments: []struct {
Text string `json:"text"`
}{
{
Text: fmt.Sprintf("%s resource added. Please find it here: URL_PLACEHOLDER", resource.Title),
},
},
}
commandResponse, err := json.Marshal(cmdResponse)
if err != nil {
http.Error(response, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
response.Header().Set("Content-Type", "application/json")
response.Write(commandResponse)
}
}
}
func (s *Server) withSlackVerification(h http.HandlerFunc) http.HandlerFunc {
return func(response http.ResponseWriter, req *http.Request) {
validateSlackRequest := func(request *http.Request) error {
var b bytes.Buffer
version := os.Getenv("API_VERSION")
if version == "" {
version = "v0"
}
b.WriteString(version)
b.WriteString(":")
rawTimestamp := request.Header.Get("X-Slack-Request-Timestamp")
if rawTimestamp == "" {
return errors.New("bad request")
}
var err error
verifyTimestamps := true
verificationFlag := os.Getenv("VERIFY_TIMESTAMPS")
if verificationFlag != "" {
verifyTimestamps, err = strconv.ParseBool(verificationFlag)
}
if verifyTimestamps {
ts, err := strconv.Atoi(rawTimestamp)
if err != nil {
return errors.New("bad request")
}
timestamp := time.Unix(int64(ts), 0)
if timestamp.Before(time.Now().Add(-5 * time.Minute)) {
return errors.New("bad request")
}
}
b.WriteString(rawTimestamp)
b.WriteString(":")
body, err := ioutil.ReadAll(request.Body)
defer request.Body.Close()
if err != nil {
return err
}
request.Body = ioutil.NopCloser(bytes.NewBuffer(body))
b.Write(body)
signature := request.Header.Get("X-Slack-Signature")
if signature == "" {
return errors.New("bad request")
}
secret := os.Getenv("SIGNING_SECRET")
h := hmac.New(sha256.New, []byte(secret))
h.Write(b.Bytes())
sha := hex.EncodeToString(h.Sum(nil))
calculatedSignature := fmt.Sprintf("%s=%s", version, sha)
if calculatedSignature != signature {
return errors.New("bad request")
}
return nil
}
includeValidation := true
validationFlag := os.Getenv("WITH_VALIDATION")
if validationFlag != "" {
includeValidation, _ = strconv.ParseBool(validationFlag)
}
if includeValidation {
err := validateSlackRequest(req)
if err != nil {
http.Error(response, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
h(response, req)
}
}
// NewServer constructs a new resourceful Server ready to use.
func NewServer(repo Repository) *Server {
server := &Server{
repo: repo,
Router: mux.NewRouter(),
}
server.routes()
return server
}
|
[
"\"API_VERSION\"",
"\"VERIFY_TIMESTAMPS\"",
"\"SIGNING_SECRET\"",
"\"WITH_VALIDATION\""
] |
[] |
[
"API_VERSION",
"WITH_VALIDATION",
"VERIFY_TIMESTAMPS",
"SIGNING_SECRET"
] |
[]
|
["API_VERSION", "WITH_VALIDATION", "VERIFY_TIMESTAMPS", "SIGNING_SECRET"]
|
go
| 4 | 0 | |
vendor/helm.sh/helm/v3/pkg/action/action.go
|
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package action
import (
"bytes"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"helm.sh/helm/v3/internal/experimental/registry"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/engine"
"helm.sh/helm/v3/pkg/kube"
"helm.sh/helm/v3/pkg/postrender"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/releaseutil"
"helm.sh/helm/v3/pkg/storage"
"helm.sh/helm/v3/pkg/storage/driver"
"helm.sh/helm/v3/pkg/time"
)
// Timestamper is a function capable of producing a timestamp.Timestamper.
//
// By default, this is a time.Time function from the Helm time package. This can
// be overridden for testing though, so that timestamps are predictable.
var Timestamper = time.Now
var (
// errMissingChart indicates that a chart was not provided.
errMissingChart = errors.New("no chart provided")
// errMissingRelease indicates that a release (name) was not provided.
errMissingRelease = errors.New("no release provided")
// errInvalidRevision indicates that an invalid release revision number was provided.
errInvalidRevision = errors.New("invalid release revision")
// errPending indicates that another instance of Helm is already applying an operation on a release.
errPending = errors.New("another operation (install/upgrade/rollback) is in progress")
)
// ValidName is a regular expression for resource names.
//
// DEPRECATED: This will be removed in Helm 4, and is no longer used here. See
// pkg/chartutil.ValidateName for the replacement.
//
// According to the Kubernetes help text, the regular expression it uses is:
//
// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
//
// This follows the above regular expression (but requires a full string match, not partial).
//
// The Kubernetes documentation is here, though it is not entirely correct:
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
var ValidName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
// Configuration injects the dependencies that all actions share.
type Configuration struct {
// RESTClientGetter is an interface that loads Kubernetes clients.
RESTClientGetter RESTClientGetter
// Releases stores records of releases.
Releases *storage.Storage
// KubeClient is a Kubernetes API client.
KubeClient kube.Interface
// RegistryClient is a client for working with registries
RegistryClient *registry.Client
// Capabilities describes the capabilities of the Kubernetes cluster.
Capabilities *chartutil.Capabilities
Log func(string, ...interface{})
}
// renderResources renders the templates in a chart
//
// TODO: This function is badly in need of a refactor.
// TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed
// This code has to do with writing files to disk.
func (c *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrender.PostRenderer, dryRun bool) ([]*release.Hook, *bytes.Buffer, string, error) {
hs := []*release.Hook{}
b := bytes.NewBuffer(nil)
caps, err := c.getCapabilities()
if err != nil {
return hs, b, "", err
}
if ch.Metadata.KubeVersion != "" {
if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) {
return hs, b, "", errors.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String())
}
}
var files map[string]string
var err2 error
// A `helm template` or `helm install --dry-run` should not talk to the remote cluster.
// It will break in interesting and exotic ways because other data (e.g. discovery)
// is mocked. It is not up to the template author to decide when the user wants to
// connect to the cluster. So when the user says to dry run, respect the user's
// wishes and do not connect to the cluster.
if !dryRun && c.RESTClientGetter != nil {
rest, err := c.RESTClientGetter.ToRESTConfig()
if err != nil {
return hs, b, "", err
}
files, err2 = engine.RenderWithClient(ch, values, rest)
} else {
files, err2 = engine.Render(ch, values)
}
if err2 != nil {
return hs, b, "", err2
}
// NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource,
// pull it out of here into a separate file so that we can actually use the output of the rendered
// text file. We have to spin through this map because the file contains path information, so we
// look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip
// it in the sortHooks.
var notesBuffer bytes.Buffer
for k, v := range files {
if strings.HasSuffix(k, notesFileSuffix) {
if subNotes || (k == path.Join(ch.Name(), "templates", notesFileSuffix)) {
// If buffer contains data, add newline before adding more
if notesBuffer.Len() > 0 {
notesBuffer.WriteString("\n")
}
notesBuffer.WriteString(v)
}
delete(files, k)
}
}
notes := notesBuffer.String()
// Sort hooks, manifests, and partials. Only hooks and manifests are returned,
// as partials are not used after renderer.Render. Empty manifests are also
// removed here.
hs, manifests, err := releaseutil.SortManifests(files, caps.APIVersions, releaseutil.InstallOrder)
if err != nil {
// By catching parse errors here, we can prevent bogus releases from going
// to Kubernetes.
//
// We return the files as a big blob of data to help the user debug parser
// errors.
for name, content := range files {
if strings.TrimSpace(content) == "" {
continue
}
fmt.Fprintf(b, "---\n# Source: %s\n%s\n", name, content)
}
return hs, b, "", err
}
// Aggregate all valid manifests into one big doc.
fileWritten := make(map[string]bool)
if includeCrds {
for _, crd := range ch.CRDObjects() {
if outputDir == "" {
fmt.Fprintf(b, "---\n# Source: %s\n%s\n", crd.Name, string(crd.File.Data[:]))
} else {
err = writeToFile(outputDir, crd.Filename, string(crd.File.Data[:]), fileWritten[crd.Name])
if err != nil {
return hs, b, "", err
}
fileWritten[crd.Name] = true
}
}
}
for _, m := range manifests {
if outputDir == "" {
fmt.Fprintf(b, "---\n# Source: %s\n%s\n", m.Name, m.Content)
} else {
newDir := outputDir
if useReleaseName {
newDir = filepath.Join(outputDir, releaseName)
}
// NOTE: We do not have to worry about the post-renderer because
// output dir is only used by `helm template`. In the next major
// release, we should move this logic to template only as it is not
// used by install or upgrade
err = writeToFile(newDir, m.Name, m.Content, fileWritten[m.Name])
if err != nil {
return hs, b, "", err
}
fileWritten[m.Name] = true
}
}
if pr != nil {
b, err = pr.Run(b)
if err != nil {
return hs, b, notes, errors.Wrap(err, "error while running post render on files")
}
}
return hs, b, notes, nil
}
// RESTClientGetter gets the rest client
type RESTClientGetter interface {
ToRESTConfig() (*rest.Config, error)
ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error)
ToRESTMapper() (meta.RESTMapper, error)
}
// DebugLog sets the logger that writes debug strings
type DebugLog func(format string, v ...interface{})
// capabilities builds a Capabilities from discovery information.
func (c *Configuration) getCapabilities() (*chartutil.Capabilities, error) {
if c.Capabilities != nil {
return c.Capabilities, nil
}
dc, err := c.RESTClientGetter.ToDiscoveryClient()
if err != nil {
return nil, errors.Wrap(err, "could not get Kubernetes discovery client")
}
// force a discovery cache invalidation to always fetch the latest server version/capabilities.
dc.Invalidate()
kubeVersion, err := dc.ServerVersion()
if err != nil {
return nil, errors.Wrap(err, "could not get server version from Kubernetes")
}
// Issue #6361:
// Client-Go emits an error when an API service is registered but unimplemented.
// We trap that error here and print a warning. But since the discovery client continues
// building the API object, it is correctly populated with all valid APIs.
// See https://github.com/kubernetes/kubernetes/issues/72051#issuecomment-521157642
apiVersions, err := GetVersionSet(dc)
if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) {
c.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err)
c.Log("WARNING: To fix this, kubectl delete apiservice <service-name>")
} else {
return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes")
}
}
c.Capabilities = &chartutil.Capabilities{
APIVersions: apiVersions,
KubeVersion: chartutil.KubeVersion{
Version: kubeVersion.GitVersion,
Major: kubeVersion.Major,
Minor: kubeVersion.Minor,
},
}
return c.Capabilities, nil
}
// KubernetesClientSet creates a new kubernetes ClientSet based on the configuration
func (c *Configuration) KubernetesClientSet() (kubernetes.Interface, error) {
conf, err := c.RESTClientGetter.ToRESTConfig()
if err != nil {
return nil, errors.Wrap(err, "unable to generate config for kubernetes client")
}
return kubernetes.NewForConfig(conf)
}
// Now generates a timestamp
//
// If the configuration has a Timestamper on it, that will be used.
// Otherwise, this will use time.Now().
func (c *Configuration) Now() time.Time {
return Timestamper()
}
func (c *Configuration) releaseContent(name string, version int) (*release.Release, error) {
if err := chartutil.ValidateReleaseName(name); err != nil {
return nil, errors.Errorf("releaseContent: Release name is invalid: %s", name)
}
if version <= 0 {
return c.Releases.Last(name)
}
return c.Releases.Get(name, version)
}
// GetVersionSet retrieves a set of available k8s API versions
func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.VersionSet, error) {
groups, resources, err := client.ServerGroupsAndResources()
if err != nil && !discovery.IsGroupDiscoveryFailedError(err) {
return chartutil.DefaultVersionSet, errors.Wrap(err, "could not get apiVersions from Kubernetes")
}
// FIXME: The Kubernetes test fixture for cli appears to always return nil
// for calls to Discovery().ServerGroupsAndResources(). So in this case, we
// return the default API list. This is also a safe value to return in any
// other odd-ball case.
if len(groups) == 0 && len(resources) == 0 {
return chartutil.DefaultVersionSet, nil
}
versionMap := make(map[string]interface{})
versions := []string{}
// Extract the groups
for _, g := range groups {
for _, gv := range g.Versions {
versionMap[gv.GroupVersion] = struct{}{}
}
}
// Extract the resources
var id string
var ok bool
for _, r := range resources {
for _, rl := range r.APIResources {
// A Kind at a GroupVersion can show up more than once. We only want
// it displayed once in the final output.
id = path.Join(r.GroupVersion, rl.Kind)
if _, ok = versionMap[id]; !ok {
versionMap[id] = struct{}{}
}
}
}
// Convert to a form that NewVersionSet can use
for k := range versionMap {
versions = append(versions, k)
}
return chartutil.VersionSet(versions), nil
}
// recordRelease with an update operation in case reuse has been set.
func (c *Configuration) recordRelease(r *release.Release) {
if err := c.Releases.Update(r); err != nil {
c.Log("warning: Failed to update release %s: %s", r.Name, err)
}
}
// Init initializes the action configuration
func (c *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error {
kc := kube.New(getter)
kc.Log = log
lazyClient := &lazyClient{
namespace: namespace,
clientFn: kc.Factory.KubernetesClientSet,
}
var store *storage.Storage
switch helmDriver {
case "secret", "secrets", "":
d := driver.NewSecrets(newSecretClient(lazyClient))
d.Log = log
store = storage.Init(d)
case "configmap", "configmaps":
d := driver.NewConfigMaps(newConfigMapClient(lazyClient))
d.Log = log
store = storage.Init(d)
case "memory":
var d *driver.Memory
if c.Releases != nil {
if mem, ok := c.Releases.Driver.(*driver.Memory); ok {
// This function can be called more than once (e.g., helm list --all-namespaces).
// If a memory driver was already initialized, re-use it but set the possibly new namespace.
// We re-use it in case some releases where already created in the existing memory driver.
d = mem
}
}
if d == nil {
d = driver.NewMemory()
}
d.SetNamespace(namespace)
store = storage.Init(d)
case "sql":
d, err := driver.NewSQL(
os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"),
log,
namespace,
)
if err != nil {
panic(fmt.Sprintf("Unable to instantiate SQL driver: %v", err))
}
store = storage.Init(d)
default:
// Not sure what to do here.
panic("Unknown driver in HELM_DRIVER: " + helmDriver)
}
c.RESTClientGetter = getter
c.KubeClient = kc
c.Releases = store
c.Log = log
return nil
}
|
[
"\"HELM_DRIVER_SQL_CONNECTION_STRING\""
] |
[] |
[
"HELM_DRIVER_SQL_CONNECTION_STRING"
] |
[]
|
["HELM_DRIVER_SQL_CONNECTION_STRING"]
|
go
| 1 | 0 | |
storage_test.go
|
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/suite"
)
type StorageTestSuite struct {
suite.Suite
StorageBackends map[string]Backend
TempDirectory string
}
func (suite *StorageTestSuite) setupStorageBackends() {
timestamp := time.Now().Format("20060102150405")
suite.TempDirectory = fmt.Sprintf("../../.test/storage-storage/%s", timestamp)
suite.StorageBackends = make(map[string]Backend)
suite.StorageBackends["LocalFilesystem"] = Backend(NewLocalFilesystemBackend(suite.TempDirectory))
// create empty dir in local storage to make sure it doesnt end up in ListObjects
err := os.MkdirAll(fmt.Sprintf("%s/%s", suite.TempDirectory, "ignoreme"), 0777)
suite.Nil(err, "No error creating ignored dir in local storage")
if os.Getenv("TEST_CLOUD_STORAGE") == "1" {
prefix := fmt.Sprintf("unittest/%s", timestamp)
s3Bucket := os.Getenv("TEST_STORAGE_AMAZON_BUCKET")
s3Region := os.Getenv("TEST_STORAGE_AMAZON_REGION")
gcsBucket := os.Getenv("TEST_STORAGE_GOOGLE_BUCKET")
blobContainer := os.Getenv("TEST_STORAGE_MICROSOFT_CONTAINER")
ossBucket := os.Getenv("TEST_STORAGE_ALIBABA_BUCKET")
ossEndpoint := os.Getenv("TEST_STORAGE_ALIBABA_ENDPOINT")
osContainer := os.Getenv("TEST_STORAGE_OPENSTACK_CONTAINER")
osRegion := os.Getenv("TEST_STORAGE_OPENSTACK_REGION")
ocsBucket := os.Getenv("TEST_STORAGE_ORACLE_BUCKET")
ocsRegion := os.Getenv("TEST_STORAGE_ORACLE_REGION")
ocsCompartmentId := os.Getenv("TEST_STORAGE_ORACLE_COMPARTMENTID")
bosBucket := os.Getenv("TEST_STORAGE_BAIDU_BUCKET")
bosEndpoint := os.Getenv("TEST_STORAGE_BAIDU_ENDPOINT")
cosBucket := os.Getenv("TEST_STORAGE_TENCENT_BUCKET")
cosEndpoint := os.Getenv("TEST_STORAGE_TENCENT_ENDPOINT")
if s3Bucket != "" && s3Region != "" {
suite.StorageBackends["AmazonS3"] = Backend(NewAmazonS3Backend(s3Bucket, prefix, s3Region, "", ""))
}
if gcsBucket != "" {
suite.StorageBackends["GoogleCS"] = Backend(NewGoogleCSBackend(gcsBucket, prefix))
}
if blobContainer != "" {
suite.StorageBackends["MicrosoftBlob"] = Backend(NewMicrosoftBlobBackend(blobContainer, prefix))
}
if ossBucket != "" {
suite.StorageBackends["AlibabaCloudOSS"] = Backend(NewAlibabaCloudOSSBackend(ossBucket, prefix, ossEndpoint, ""))
}
if osContainer != "" {
suite.StorageBackends["OpenStackOS"] = Backend(NewOpenstackOSBackend(osContainer, prefix, osRegion, ""))
}
if ocsBucket != "" {
suite.StorageBackends["OracleCS"] = Backend(NewOracleCSBackend(ocsBucket, prefix, ocsRegion, ocsCompartmentId))
}
if bosBucket != "" {
suite.StorageBackends["BaiduCloudBOS"] = Backend(NewBaiDuBOSBackend(bosBucket, prefix, bosEndpoint))
}
if cosBucket != "" {
suite.StorageBackends["TencentCloudCOS"] = Backend(NewTencentCloudCOSBackend(cosBucket, prefix, cosEndpoint))
}
}
}
func (suite *StorageTestSuite) SetupSuite() {
suite.setupStorageBackends()
for i := 1; i <= 9; i++ {
data := []byte(fmt.Sprintf("test content %d", i))
path := fmt.Sprintf("test%d.txt", i)
for key, backend := range suite.StorageBackends {
err := backend.PutObject(path, data)
message := fmt.Sprintf("no error putting object %s using %s backend", path, key)
suite.Nil(err, message)
}
}
for key, backend := range suite.StorageBackends {
if key == "LocalFilesystem" {
continue
}
data := []byte("skipped object")
path := "this/is/a/skipped/object.txt"
err := backend.PutObject(path, data)
message := fmt.Sprintf("no error putting skipped object %s using %s backend", path, key)
suite.Nil(err, message)
}
}
func (suite *StorageTestSuite) TearDownSuite() {
defer os.RemoveAll(suite.TempDirectory)
for i := 1; i <= 9; i++ {
path := fmt.Sprintf("test%d.txt", i)
for key, backend := range suite.StorageBackends {
err := backend.DeleteObject(path)
message := fmt.Sprintf("no error deleting object %s using %s backend", path, key)
suite.Nil(err, message)
}
}
for key, backend := range suite.StorageBackends {
if key == "LocalFilesystem" {
continue
}
path := "this/is/a/skipped/object.txt"
err := backend.DeleteObject(path)
message := fmt.Sprintf("no error deleting skipped object %s using %s backend", path, key)
suite.Nil(err, message)
}
}
func (suite *StorageTestSuite) TestListObjects() {
for key, backend := range suite.StorageBackends {
objects, err := backend.ListObjects("")
message := fmt.Sprintf("no error listing objects using %s backend", key)
suite.Nil(err, message)
expectedNumObjects := 9
message = fmt.Sprintf("%d objects listed using %s backend", expectedNumObjects, key)
suite.Equal(expectedNumObjects, len(objects), message)
for i, object := range objects {
path := fmt.Sprintf("test%d.txt", (i + 1))
message = fmt.Sprintf("object %s found in list objects using %s backend", path, key)
suite.Equal(path, object.Path, message)
}
}
}
func (suite *StorageTestSuite) TestGetObject() {
for key, backend := range suite.StorageBackends {
for i := 1; i <= 9; i++ {
path := fmt.Sprintf("test%d.txt", i)
object, err := backend.GetObject(path)
message := fmt.Sprintf("no error getting object %s using %s backend", path, key)
suite.Nil(err, message)
message = fmt.Sprintf("object %s content as expected using %s backend", path, key)
suite.Equal(object.Content, []byte(fmt.Sprintf("test content %d", i)), message)
}
}
}
func (suite *StorageTestSuite) TestHasSuffix() {
now := time.Now()
o1 := Object{
Path: "mychart-0.1.0.tgz",
Content: []byte{},
LastModified: now,
}
suite.True(o1.HasExtension("tgz"), "object has tgz suffix")
o2 := Object{
Path: "mychart-0.1.0.txt",
Content: []byte{},
LastModified: now,
}
suite.False(o2.HasExtension("tgz"), "object does not have tgz suffix")
}
func (suite *StorageTestSuite) TestGetObjectSliceDiff() {
now := time.Now()
os1 := []Object{
{
Path: "test1.txt",
Content: []byte{},
LastModified: now,
},
}
os2 := []Object{}
diff := GetObjectSliceDiff(os1, os2)
suite.True(diff.Change, "change detected")
suite.Equal(diff.Removed, os1, "removed slice populated")
suite.Empty(diff.Added, "added slice empty")
suite.Empty(diff.Updated, "updated slice empty")
os2 = append(os2, os1[0])
diff = GetObjectSliceDiff(os1, os2)
suite.False(diff.Change, "no change detected")
suite.Empty(diff.Removed, "removed slice empty")
suite.Empty(diff.Added, "added slice empty")
suite.Empty(diff.Updated, "updated slice empty")
os2[0].LastModified = now.Add(1)
diff = GetObjectSliceDiff(os1, os2)
suite.True(diff.Change, "change detected")
suite.Empty(diff.Removed, "removed slice empty")
suite.Empty(diff.Added, "added slice empty")
suite.Equal(diff.Updated, os2, "updated slice populated")
os2[0].LastModified = now
os2 = append(os2, Object{
Path: "test2.txt",
Content: []byte{},
LastModified: now,
})
diff = GetObjectSliceDiff(os1, os2)
suite.True(diff.Change, "change detected")
suite.Empty(diff.Removed, "removed slice empty")
suite.Equal(diff.Added, []Object{os2[1]}, "added slice empty")
suite.Empty(diff.Updated, "updated slice empty")
}
func TestStorageTestSuite(t *testing.T) {
suite.Run(t, new(StorageTestSuite))
}
|
[
"\"TEST_CLOUD_STORAGE\"",
"\"TEST_STORAGE_AMAZON_BUCKET\"",
"\"TEST_STORAGE_AMAZON_REGION\"",
"\"TEST_STORAGE_GOOGLE_BUCKET\"",
"\"TEST_STORAGE_MICROSOFT_CONTAINER\"",
"\"TEST_STORAGE_ALIBABA_BUCKET\"",
"\"TEST_STORAGE_ALIBABA_ENDPOINT\"",
"\"TEST_STORAGE_OPENSTACK_CONTAINER\"",
"\"TEST_STORAGE_OPENSTACK_REGION\"",
"\"TEST_STORAGE_ORACLE_BUCKET\"",
"\"TEST_STORAGE_ORACLE_REGION\"",
"\"TEST_STORAGE_ORACLE_COMPARTMENTID\"",
"\"TEST_STORAGE_BAIDU_BUCKET\"",
"\"TEST_STORAGE_BAIDU_ENDPOINT\"",
"\"TEST_STORAGE_TENCENT_BUCKET\"",
"\"TEST_STORAGE_TENCENT_ENDPOINT\""
] |
[] |
[
"TEST_STORAGE_ORACLE_COMPARTMENTID",
"TEST_STORAGE_BAIDU_BUCKET",
"TEST_STORAGE_OPENSTACK_REGION",
"TEST_STORAGE_AMAZON_BUCKET",
"TEST_STORAGE_AMAZON_REGION",
"TEST_STORAGE_ALIBABA_ENDPOINT",
"TEST_STORAGE_OPENSTACK_CONTAINER",
"TEST_STORAGE_GOOGLE_BUCKET",
"TEST_STORAGE_ORACLE_BUCKET",
"TEST_STORAGE_TENCENT_ENDPOINT",
"TEST_STORAGE_ORACLE_REGION",
"TEST_STORAGE_BAIDU_ENDPOINT",
"TEST_STORAGE_ALIBABA_BUCKET",
"TEST_STORAGE_TENCENT_BUCKET",
"TEST_STORAGE_MICROSOFT_CONTAINER",
"TEST_CLOUD_STORAGE"
] |
[]
|
["TEST_STORAGE_ORACLE_COMPARTMENTID", "TEST_STORAGE_BAIDU_BUCKET", "TEST_STORAGE_OPENSTACK_REGION", "TEST_STORAGE_AMAZON_BUCKET", "TEST_STORAGE_AMAZON_REGION", "TEST_STORAGE_ALIBABA_ENDPOINT", "TEST_STORAGE_OPENSTACK_CONTAINER", "TEST_STORAGE_GOOGLE_BUCKET", "TEST_STORAGE_ORACLE_BUCKET", "TEST_STORAGE_TENCENT_ENDPOINT", "TEST_STORAGE_ORACLE_REGION", "TEST_STORAGE_BAIDU_ENDPOINT", "TEST_STORAGE_ALIBABA_BUCKET", "TEST_STORAGE_TENCENT_BUCKET", "TEST_STORAGE_MICROSOFT_CONTAINER", "TEST_CLOUD_STORAGE"]
|
go
| 16 | 0 | |
build/uf2conv.py
|
#!/usr/bin/env python3
#command python uf2conv.py -o end_file_name.uf2 start_file_name.bin
import sys
import struct
import subprocess
import re
import os
import os.path
import argparse
UF2_MAGIC_START0 = 0x0A324655 # "UF2\n"
UF2_MAGIC_START1 = 0x9E5D5157 # Randomly selected
UF2_MAGIC_END = 0x0AB16F30 # Ditto
families = {
'SAMD21': 0x68ed2b88,
'SAMD51': 0x55114460,
'NRF52': 0x1b57745f,
'STM32F1': 0x5ee21072,
'STM32F4': 0x57755a57,
'ATMEGA32': 0x16573617,
}
INFO_FILE = "/INFO_UF2.TXT"
appstartaddr = 0x2000
familyid = 0x0
def is_uf2(buf):
w = struct.unpack("<II", buf[0:8])
return w[0] == UF2_MAGIC_START0 and w[1] == UF2_MAGIC_START1
def is_hex(buf):
try:
w = buf[0:30].decode("utf-8")
except UnicodeDecodeError:
return False
if w[0] == ':' and re.match(b"^[:0-9a-fA-F\r\n]+$", buf):
return True
return False
def convert_from_uf2(buf):
global appstartaddr
numblocks = len(buf) // 512
curraddr = None
outp = b""
for blockno in range(numblocks):
ptr = blockno * 512
block = buf[ptr:ptr + 512]
hd = struct.unpack(b"<IIIIIIII", block[0:32])
if hd[0] != UF2_MAGIC_START0 or hd[1] != UF2_MAGIC_START1:
print("Skipping block at " + ptr + "; bad magic")
continue
if hd[2] & 1:
# NO-flash flag set; skip block
continue
datalen = hd[4]
if datalen > 476:
assert False, "Invalid UF2 data size at " + ptr
newaddr = hd[3]
if curraddr == None:
appstartaddr = newaddr
curraddr = newaddr
padding = newaddr - curraddr
if padding < 0:
assert False, "Block out of order at " + ptr
if padding > 10*1024*1024:
assert False, "More than 10M of padding needed at " + ptr
if padding % 4 != 0:
assert False, "Non-word padding size at " + ptr
while padding > 0:
padding -= 4
outp += b"\x00\x00\x00\x00"
outp += block[32 : 32 + datalen]
curraddr = newaddr + datalen
return outp
def convert_to_carray(file_content):
outp = "const unsigned char bindata[] __attribute__((aligned(16))) = {"
for i in range(len(file_content)):
if i % 16 == 0:
outp += "\n"
outp += "0x%02x, " % ord(file_content[i])
outp += "\n};\n"
return outp
def convert_to_uf2(file_content):
global familyid
datapadding = b""
while len(datapadding) < 512 - 256 - 32 - 4:
datapadding += b"\x00\x00\x00\x00"
numblocks = (len(file_content) + 255) // 256
outp = b""
for blockno in range(numblocks):
ptr = 256 * blockno
chunk = file_content[ptr:ptr + 256]
flags = 0x0
if familyid:
flags |= 0x2000
hd = struct.pack(b"<IIIIIIII",
UF2_MAGIC_START0, UF2_MAGIC_START1,
flags, ptr + appstartaddr, 256, blockno, numblocks, familyid)
while len(chunk) < 256:
chunk += b"\x00"
block = hd + chunk + datapadding + struct.pack(b"<I", UF2_MAGIC_END)
assert len(block) == 512
outp += block
return outp
class Block:
def __init__(self, addr):
self.addr = addr
self.bytes = bytearray(256)
def encode(self, blockno, numblocks):
global familyid
flags = 0x0
if familyid:
flags |= 0x2000
hd = struct.pack("<IIIIIIII",
UF2_MAGIC_START0, UF2_MAGIC_START1,
flags, self.addr, 256, blockno, numblocks, familyid)
hd += self.bytes[0:256]
while len(hd) < 512 - 4:
hd += b"\x00"
hd += struct.pack("<I", UF2_MAGIC_END)
return hd
def convert_from_hex_to_uf2(buf):
global appstartaddr
appstartaddr = None
upper = 0
currblock = None
blocks = []
for line in buf.split('\n'):
if line[0] != ":":
continue
i = 1
rec = []
while i < len(line) - 1:
rec.append(int(line[i:i+2], 16))
i += 2
tp = rec[3]
if tp == 4:
upper = ((rec[4] << 8) | rec[5]) << 16
elif tp == 2:
upper = ((rec[4] << 8) | rec[5]) << 4
assert (upper & 0xffff) == 0
elif tp == 1:
break
elif tp == 0:
addr = upper | (rec[1] << 8) | rec[2]
if appstartaddr == None:
appstartaddr = addr
i = 4
while i < len(rec) - 1:
if not currblock or currblock.addr & ~0xff != addr & ~0xff:
currblock = Block(addr & ~0xff)
blocks.append(currblock)
currblock.bytes[addr & 0xff] = rec[i]
addr += 1
i += 1
numblocks = len(blocks)
resfile = b""
for i in range(0, numblocks):
resfile += blocks[i].encode(i, numblocks)
return resfile
def to_str(b):
return b.decode("utf-8")
def get_drives():
drives = []
if sys.platform == "win32":
r = subprocess.check_output(["wmic", "PATH", "Win32_LogicalDisk",
"get", "DeviceID,", "VolumeName,",
"FileSystem,", "DriveType"])
for line in to_str(r).split('\n'):
words = re.split('\s+', line)
if len(words) >= 3 and words[1] == "2" and words[2] == "FAT":
drives.append(words[0])
else:
rootpath = "/media"
if sys.platform == "darwin":
rootpath = "/Volumes"
elif sys.platform == "linux":
tmp = rootpath + "/" + os.environ["USER"]
if os.path.isdir(tmp):
rootpath = tmp
for d in os.listdir(rootpath):
drives.append(os.path.join(rootpath, d))
def has_info(d):
try:
return os.path.isfile(d + INFO_FILE)
except:
return False
return list(filter(has_info, drives))
def board_id(path):
with open(path + INFO_FILE, mode='r') as file:
file_content = file.read()
return re.search("Board-ID: ([^\r\n]*)", file_content).group(1)
def list_drives():
for d in get_drives():
print(d, board_id(d))
def write_file(name, buf):
with open(name, "wb") as f:
f.write(buf)
print("Wrote %d bytes to %s." % (len(buf), name))
def main():
global appstartaddr, familyid
def error(msg):
print(msg)
sys.exit(1)
parser = argparse.ArgumentParser(description='Convert to UF2 or flash directly.')
parser.add_argument('input', metavar='INPUT', type=str, nargs='?',
help='input file (HEX, BIN or UF2)')
parser.add_argument('-b' , '--base', dest='base', type=str,
default="0x2000",
help='set base address of application for BIN format (default: 0x2000)')
parser.add_argument('-o' , '--output', metavar="FILE", dest='output', type=str,
help='write output to named file; defaults to "flash.uf2" or "flash.bin" where sensible')
parser.add_argument('-d' , '--device', dest="device_path",
help='select a device path to flash')
parser.add_argument('-l' , '--list', action='store_true',
help='list connected devices')
parser.add_argument('-c' , '--convert', action='store_true',
help='do not flash, just convert')
parser.add_argument('-D' , '--deploy', action='store_true',
help='just flash, do not convert')
parser.add_argument('-f' , '--family', dest='family', type=str,
default="0x0",
help='specify familyID - number or name (default: 0x0)')
parser.add_argument('-C' , '--carray', action='store_true',
help='convert binary file to a C array, not UF2')
args = parser.parse_args()
appstartaddr = int(args.base, 0)
if args.family.upper() in families:
familyid = families[args.family.upper()]
else:
try:
familyid = int(args.family, 0)
except ValueError:
error("Family ID needs to be a number or one of: " + ", ".join(families.keys()))
if args.list:
list_drives()
else:
if not args.input:
error("Need input file")
with open(args.input, mode='rb') as f:
inpbuf = f.read()
from_uf2 = is_uf2(inpbuf)
ext = "uf2"
if args.deploy:
outbuf = inpbuf
elif from_uf2:
outbuf = convert_from_uf2(inpbuf)
ext = "bin"
elif is_hex(inpbuf):
outbuf = convert_from_hex_to_uf2(inpbuf.decode("utf-8"))
elif args.carray:
outbuf = convert_to_carray(inpbuf)
ext = "h"
else:
outbuf = convert_to_uf2(inpbuf)
print("Converting to %s, output size: %d, start address: 0x%x" %
(ext, len(outbuf), appstartaddr))
if args.convert or ext != "uf2":
drives = []
if args.output == None:
args.output = "flash." + ext
else:
drives = get_drives()
if args.output:
write_file(args.output, outbuf)
else:
if len(drives) == 0:
error("No drive to deploy.")
for d in drives:
print("Flashing %s (%s)" % (d, board_id(d)))
write_file(d + "/NEW.UF2", outbuf)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
python
| 1 | 0 | |
cmd/telegraf/telegraf_windows.go
|
//go:build windows
// +build windows
package main
import (
"log"
"os"
"runtime"
"github.com/influxdata/telegraf/logger"
"github.com/kardianos/service"
)
func run(inputFilters, outputFilters []string) {
// Register the eventlog logging target for windows.
logger.RegisterEventLogger(*fServiceName)
if runtime.GOOS == "windows" && windowsRunAsService() {
runAsWindowsService(
inputFilters,
outputFilters,
)
} else {
stop = make(chan struct{})
reloadLoop(
inputFilters,
outputFilters,
)
}
}
type program struct {
inputFilters []string
outputFilters []string
}
func (p *program) Start(s service.Service) error {
go p.run()
return nil
}
func (p *program) run() {
stop = make(chan struct{})
reloadLoop(
p.inputFilters,
p.outputFilters,
)
}
func (p *program) Stop(s service.Service) error {
close(stop)
return nil
}
func runAsWindowsService(inputFilters, outputFilters []string) {
programFiles := os.Getenv("ProgramFiles")
if programFiles == "" { // Should never happen
programFiles = "C:\\Program Files"
}
svcConfig := &service.Config{
Name: *fServiceName,
DisplayName: *fServiceDisplayName,
Description: "Collects data using a series of plugins and publishes it to " +
"another series of plugins.",
Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"},
}
prg := &program{
inputFilters: inputFilters,
outputFilters: outputFilters,
}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal("E! " + err.Error())
}
// Handle the --service flag here to prevent any issues with tooling that
// may not have an interactive session, e.g. installing from Ansible.
if *fService != "" {
if len(fConfigs) > 0 {
svcConfig.Arguments = []string{}
}
for _, fConfig := range fConfigs {
svcConfig.Arguments = append(svcConfig.Arguments, "--config", fConfig)
}
for _, fConfigDirectory := range fConfigDirs {
svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", fConfigDirectory)
}
//set servicename to service cmd line, to have a custom name after relaunch as a service
svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName)
err := service.Control(s, *fService)
if err != nil {
log.Fatal("E! " + err.Error())
}
os.Exit(0)
} else {
logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog})
err = s.Run()
if err != nil {
log.Println("E! " + err.Error())
}
}
}
// Return true if Telegraf should create a Windows service.
func windowsRunAsService() bool {
if *fService != "" {
return true
}
if *fRunAsConsole {
return false
}
return !service.Interactive()
}
|
[
"\"ProgramFiles\""
] |
[] |
[
"ProgramFiles"
] |
[]
|
["ProgramFiles"]
|
go
| 1 | 0 | |
test/test_device_datasource_graph.py
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.device_datasource_graph import DeviceDatasourceGraph # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestDeviceDatasourceGraph(unittest.TestCase):
"""DeviceDatasourceGraph unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDeviceDatasourceGraph(self):
"""Test DeviceDatasourceGraph"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.device_datasource_graph.DeviceDatasourceGraph() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
Utilities/ruffus/test/test_N_x_M_and_collate.py
|
#!/usr/bin/env python
"""
test_N_x_M_and_collate.py
This script takes N pairs of input file pairs
(with the suffices .gene and .gwas)
and runs them against M sets of simulation data
(with the suffix .simulation)
A summary per input file pair is then produced
In pseudo-code:
STEP_1:
for n_file in NNN_pairs_of_input_files:
for m_file in MMM_simulation_data:
[n_file.gene,
n_file.gwas,
m_file.simulation] -> n_file.m_file.simulation_res
STEP_2:
for n_file in NNN_pairs_of_input_files:
n_file.*.simulation_res -> n_file.mean
n = CNT_GENE_GWAS_FILES
m = CNT_SIMULATION_FILES
"""
CNT_GENE_GWAS_FILES = 2
CNT_SIMULATION_FILES = 3
import os, sys
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
sys.path.insert(0, os.path.abspath(os.path.join(exe_path,"..", "..")))
from ruffus import *
import random
from itertools import izip
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
from optparse import OptionParser
parser = OptionParser(version="%prog 1.0")
parser.add_option("-D", "--debug", dest = "debug",
action="store_true", default=False,
help="Run as unit test with default values.")
parser.add_option("-k", "--keep", dest = "keep",
action="store_true", default=False,
help="Do not cleanup after unit test runs.")
parser.add_option("-t", "--target_tasks", dest="target_tasks",
action="append",
default = ["statistical_summary"],
metavar="JOBNAME",
type="string",
help="Target task(s) of pipeline.")
parser.add_option("-f", "--forced_tasks", dest="forced_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Pipeline task(s) which will be included even if they are up to date.")
parser.add_option("-j", "--jobs", dest="jobs",
default=5,
metavar="jobs",
type="int",
help="Specifies the number of jobs (commands) to run simultaneously.")
parser.add_option("-g", "--gene_data_dir", dest="gene_data_dir",
default="%s/temp_gene_data_for_intermediate_example" % exe_path,
metavar="PATH",
type="string",
help="Directory with gene data [*.genes / *.gwas].")
parser.add_option("-s", "--simulation_data_dir", dest="simulation_data_dir",
default="%s/temp_simulation_data_for_intermediate_example" % exe_path,
metavar="PATH",
type="string",
help="Directory with simulation data [*.simulation].")
parser.add_option("-w", "--working_dir", dest="working_dir",
default="%s/working_dir_for_intermediate_example" % exe_path,
metavar="PATH",
type="string",
help="Working directory.")
parser.add_option("-v", "--verbose", dest = "verbose",
action="count", default=0,
help="Print more verbose messages for each additional verbose level.")
parser.add_option("-d", "--dependency", dest="dependency_file",
metavar="FILE",
type="string",
help="Print a dependency graph of the pipeline that would be executed "
"to FILE, but do not execute it.")
parser.add_option("-F", "--dependency_graph_format", dest="dependency_graph_format",
metavar="FORMAT",
type="string",
default = 'svg',
help="format of dependency graph file. Can be 'ps' (PostScript), "+
"'svg' 'svgz' (Structured Vector Graphics), " +
"'png' 'gif' (bitmap graphics) etc ")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Print a description of the jobs that would be executed, "
"but do not execute them.")
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import StringIO
import re
import operator
import sys
from collections import defaultdict
import glob
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#_________________________________________________________________________________________
#
# get gene gwas file pairs
#
#_________________________________________________________________________________________
def get_gene_gwas_file_pairs( ):
"""
Helper function to get all *.gene, *.gwas from the direction specified
in --gene_data_dir
Returns
file pairs with both .gene and .gwas extensions,
corresponding roots (no extension) of each file
"""
gene_files = glob.glob(os.path.join(options.gene_data_dir, "*.gene"))
gwas_files = glob.glob(os.path.join(options.gene_data_dir, "*.gwas"))
common_roots = set(map(lambda x: os.path.splitext(os.path.split(x)[1])[0], gene_files))
common_roots &=set(map(lambda x: os.path.splitext(os.path.split(x)[1])[0], gwas_files))
common_roots = list(common_roots)
p = os.path; g_dir = options.gene_data_dir
file_pairs = [[p.join(g_dir, x + ".gene"), p.join(g_dir, x + ".gwas")] for x in common_roots]
return file_pairs, common_roots
#_________________________________________________________________________________________
#
# get simulation files
#
#_________________________________________________________________________________________
def get_simulation_files( ):
"""
Helper function to get all *.simulation from the direction specified
in --simulation_data_dir
Returns
file with .simulation extensions,
corresponding roots (no extension) of each file
"""
simulation_files = glob.glob(os.path.join(options.simulation_data_dir, "*.simulation"))
simulation_roots =map(lambda x: os.path.splitext(os.path.split(x)[1])[0], simulation_files)
return simulation_files, simulation_roots
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# get help string
f =StringIO.StringIO()
parser.print_help(f)
helpstr = f.getvalue()
(options, remaining_args) = parser.parse_args()
working_dir = options.working_dir
#_________________________________________________________________________________________
#
# setup_simulation_data
#
#_________________________________________________________________________________________
#
# mkdir: makes sure output directories exist before task
#
@follows(mkdir(options.gene_data_dir, options.simulation_data_dir))
def setup_simulation_data ():
"""
create simulation files
"""
for i in range(CNT_GENE_GWAS_FILES):
open(os.path.join(options.gene_data_dir, "%03d.gene" % i), "w")
open(os.path.join(options.gene_data_dir, "%03d.gwas" % i), "w")
# gene files without corresponding gwas and vice versa
open(os.path.join(options.gene_data_dir, "orphan1.gene"), "w")
open(os.path.join(options.gene_data_dir, "orphan2.gwas"), "w")
open(os.path.join(options.gene_data_dir, "orphan3.gwas"), "w")
for i in range(CNT_SIMULATION_FILES):
open(os.path.join(options.simulation_data_dir, "%03d.simulation" % i), "w")
#_________________________________________________________________________________________
#
# cleanup_simulation_data
#
#_________________________________________________________________________________________
def try_rmdir (d):
if os.path.exists(d):
try:
os.rmdir(d)
except OSError:
sys.stderr.write("Warning:\t%s is not empty and will not be removed.\n" % d)
def cleanup_simulation_data ():
"""
cleanup files
"""
if options.verbose:
sys.stderr.write("Cleanup working directory and simulation files.\n")
#
# cleanup gene and gwas files
#
for f in glob.glob(os.path.join(options.gene_data_dir, "*.gene")):
os.unlink(f)
for f in glob.glob(os.path.join(options.gene_data_dir, "*.gwas")):
os.unlink(f)
try_rmdir(options.gene_data_dir)
#
# cleanup simulation
#
for f in glob.glob(os.path.join(options.simulation_data_dir, "*.simulation")):
os.unlink(f)
try_rmdir(options.simulation_data_dir)
#
# cleanup working_dir
#
for f in glob.glob(os.path.join(working_dir, "simulation_results", "*.simulation_res")):
os.unlink(f)
try_rmdir(os.path.join(working_dir, "simulation_results"))
for f in glob.glob(os.path.join(working_dir, "*.mean")):
os.unlink(f)
try_rmdir(working_dir)
#_________________________________________________________________________________________
#
# Step 1:
#
# for n_file in NNN_pairs_of_input_files:
# for m_file in MMM_simulation_data:
#
# [n_file.gene,
# n_file.gwas,
# m_file.simulation] -> working_dir/n_file.m_file.simulation_res
#
#_________________________________________________________________________________________
def generate_simulation_params ():
"""
Custom function to generate
file names for gene/gwas simulation study
"""
simulation_files, simulation_file_roots = get_simulation_files()
gene_gwas_file_pairs, gene_gwas_file_roots = get_gene_gwas_file_pairs()
for sim_file, sim_file_root in izip(simulation_files, simulation_file_roots):
for (gene, gwas), gene_file_root in izip(gene_gwas_file_pairs, gene_gwas_file_roots):
result_file = "%s.%s.simulation_res" % (gene_file_root, sim_file_root)
result_file_path = os.path.join(working_dir, "simulation_results", result_file)
yield [gene, gwas, sim_file], result_file_path, gene_file_root, sim_file_root, result_file
#
# mkdir: makes sure output directories exist before task
#
@follows(mkdir(options.working_dir, os.path.join(working_dir, "simulation_results")))
@files(generate_simulation_params)
def gwas_simulation(input_files, result_file_path, gene_file_root, sim_file_root, result_file):
"""
Dummy calculation of gene gwas vs simulation data
Normally runs in parallel on a computational cluster
"""
(gene_file,
gwas_file,
simulation_data_file) = input_files
simulation_res_file = open(result_file_path, "w")
simulation_res_file.write("%s + %s -> %s\n" % (gene_file_root, sim_file_root, result_file))
#_________________________________________________________________________________________
#
# Step 2:
#
# Statistical summary per gene/gwas file pair
#
# for n_file in NNN_pairs_of_input_files:
# working_dir/simulation_results/n.*.simulation_res
# -> working_dir/n.mean
#
#_________________________________________________________________________________________
@collate(gwas_simulation, regex(r"simulation_results/(\d+).\d+.simulation_res"), r"\1.mean")
@posttask(lambda : sys.stdout.write("\nOK\n"))
def statistical_summary (result_files, summary_file):
"""
Simulate statistical summary
"""
summary_file = open(summary_file, "w")
for f in result_files:
summary_file.write(open(f).read())
#888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# print pipeline or run pipeline
#
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
try:
if options.debug:
if not len(options.target_tasks):
options.target_tasks.append([statistical_summary])
pipeline_run([setup_simulation_data], [setup_simulation_data], multiprocess = options.jobs, verbose = 0)
else:
if (not len(get_gene_gwas_file_pairs( )[0]) or
not len (get_simulation_files( )[0])):
print "Warning!!\n\n\tNo *.gene / *.gwas or *.simulation: Run --debug to create simulation files first\n\n"
sys.exit(1)
if options.just_print:
pipeline_printout(sys.stdout, options.target_tasks, options.forced_tasks, verbose=options.verbose)
elif options.dependency_file:
graph_printout ( open(options.dependency_file, "w"),
options.dependency_graph_format,
options.target_tasks,
options.forced_tasks)
else:
pipeline_run(options.target_tasks, options.forced_tasks, multiprocess = options.jobs, verbose = options.verbose)
if options.debug and not options.keep:
cleanup_simulation_data ()
except Exception, e:
print e.args
raise
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
examples/service/messaging/alpha_sender/create/alpha_sender_create_example.go
|
package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v1 "github.com/RJPearson94/twilio-sdk-go/service/messaging/v1"
"github.com/RJPearson94/twilio-sdk-go/service/messaging/v1/service/alpha_senders"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var messagingClient *v1.Messaging
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
messagingClient = twilio.NewWithCredentials(creds).Messaging.V1
}
func main() {
resp, err := messagingClient.
Service("MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
AlphaSenders.
Create(&alpha_senders.CreateAlphaSenderInput{
AlphaSender: "Test Company",
})
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("SID: %s", resp.Sid)
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] |
[] |
[
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
go
| 2 | 0 | |
internal/config/config.go
|
package config
import (
"flag"
"os"
"github.com/ilyakaznacheev/cleanenv"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
// Config is the unique source of configuration for the app
type Config struct {
MetricsPort string `yaml:"metrics_port" env:"MBOT_METRICS_PORT" env-default:"9802" env-description:"Port where prometheus metrics are exposed with /metrics"`
WatcherPort string `yaml:"watcher_port" env:"MBOT_WATCHER_PORT" env-default:"8000" env-description:"Port where motion-bot listens for motion events"`
MotionControlURL string `yaml:"motion_control_port" env:"MBOT_MOTION_CONTROL_URL" env-default:"http://motion:8080" env-description:"Motion Control URL"`
TelToken string `yaml:"tel_token" env:"MBOT_TEL_TOKEN" env-default:"" env-description:"Telegram Bot Token"`
TelAdminID []int `yaml:"tel_admin_id" env:"MBOT_TEL_ADMIN_ID" env-default:"" env-description:"Telegram user IDs of motion-bot Admins"`
TelSubscribedChatsID []int64 `yaml:"tel_subscribed_chats_id" env:"MBOT_TEL_SUBSCRIBED_CHATS_ID" env-default:"" env-description:"Default chats subscribed to this bot"`
Verbose bool `yaml:"verbose" env:"MBOT_VERBOSE" env-default:"false"`
}
// New creates a Config object parsing a yml file. Configs could also be added as env variables
func New() Config {
var cfg Config
fset := flag.NewFlagSet("motion-bot", flag.ExitOnError)
configPath := fset.String("config", "/etc/telebot/config.yml", "path to config file")
fset.Usage = cleanenv.FUsage(fset.Output(), &cfg, nil, fset.Usage)
if err := fset.Parse(os.Args[1:]); err != nil {
log.Fatal().Err(err).Msg("error reading parsing args")
}
if c := os.Getenv("MBOT_CONFIG_PATH"); c != "" {
*configPath = c
}
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
err := cleanenv.ReadConfig(*configPath, &cfg)
if err != nil {
log.Fatal().Err(err).Msg("error reading config file")
}
zerolog.SetGlobalLevel(zerolog.InfoLevel)
if cfg.Verbose {
zerolog.SetGlobalLevel(zerolog.DebugLevel)
}
return cfg
}
|
[
"\"MBOT_CONFIG_PATH\""
] |
[] |
[
"MBOT_CONFIG_PATH"
] |
[]
|
["MBOT_CONFIG_PATH"]
|
go
| 1 | 0 | |
idb.py
|
# coding: utf-8
#
# require: python >= 3.6
import base64
import json
import os
import sys
import subprocess
import time
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Callable
import tornado
import requests
from logzero import logger
from tornado import gen, httpclient, locks
from tornado.concurrent import run_on_executor
from tornado.ioloop import IOLoop
from freeport import freeport
DeviceEvent = namedtuple('DeviceEvent', ['present', 'udid'])
def runcommand(*args) -> str:
try:
output = subprocess.check_output(args)
return output.strip().decode('utf-8')
except (subprocess.CalledProcessError, FileNotFoundError):
return ""
except Exception as e:
logger.warning("unknown error: %s", e)
return ""
def list_devices():
udids = runcommand('idevice_id', '-l').splitlines()
return udids
def udid2name(udid: str) -> str:
return runcommand("idevicename", "-u", udid)
def udid2product(udid):
"""
See also: https://www.theiphonewiki.com/wiki/Models
"""
pt = runcommand("ideviceinfo", "--udid", udid, "--key", "ProductType")
models = {
"iPhone5,1": "iPhone 5",
"iPhone5,2": "iPhone 5",
"iPhone5,3": "iPhone 5c",
"iPhone5,4": "iPhone 5c",
"iPhone6,1": "iPhone 5s",
"iPhone6,2": "iPhone 5s",
"iPhone7,1": "iPhone 6 Plus",
"iPhone7,2": "iPhone 6",
"iPhone8,1": "iPhone 6s",
"iPhone8,2": "iPhone 6s Plus",
"iPhone8,4": "iPhone SE",
"iPhone9,1": "iPhone 7", # Global
"iPhone9,2": "iPhone 7 Plus", # Global
"iPhone9,3": "iPhone 7", # GSM
"iPhone9,4": "iPhone 7 Plus", # GSM
"iPhone10,1": "iPhone 8", # Global
"iPhone10,2": "iPhone 8 Plus", # Global
"iPhone10,3": "iPhone X", # Global
"iPhone10,4": "iPhone 8", # GSM
"iPhone10,5": "iPhone 8 Plus", # GSM
"iPhone10,6": "iPhone X", # GSM
"iPhone11,8": "iPhone XR",
"iPhone11,2": "iPhone XS",
"iPhone11,4": "iPhone XS Max",
"iPhone11,6": "iPhone XS Max",
"iPhone11,8": "iPhone XR",
"iPhone12,1": "iPhone 11",
"iPhone12,3": "iPhone 11 Pro",
"iPhone12,5": "iPhone 11 Pro Max",
"iPhone12,8": "iPhone SE 2nd",
# simulator
"i386": "iPhone Simulator",
"x86_64": "iPhone Simulator",
}
return models.get(pt, "Unknown")
class Tracker():
executor = ThreadPoolExecutor(4)
def __init__(self):
self._lasts = []
@run_on_executor(executor='executor')
def list_devices(self):
return list_devices()
@gen.coroutine
def update(self):
""" Wired, can not use async here """
lasts = self._lasts
currs = yield self.list_devices()
gones = set(lasts).difference(currs) # 離線
backs = set(currs).difference(lasts) # 在線
self._lasts = currs
raise gen.Return((backs, gones))
async def track_devices(self):
while True:
backs, gones = await self.update()
for udid in backs:
yield DeviceEvent(True, udid)
for udid in gones:
yield DeviceEvent(False, udid)
await gen.sleep(1)
def track_devices():
t = Tracker()
return t.track_devices()
async def nop_callback(*args, **kwargs):
pass
class WDADevice(object):
"""
Example usage:
lock = locks.Lock() # xcodebuild test is not support parallel run
async def callback(device: WDADevice, status, info=None):
pass
d = WDADevice("xxxxxx-udid-xxxxx", lock, callback)
d.start()
await d.stop()
"""
status_preparing = "preparing"
status_ready = "ready"
status_fatal = "fatal"
def __init__(self, udid: str, lock: locks.Lock, callback):
"""
Args:
callback: function (str, dict) -> None
Example callback:
callback("update", {"ip": "1.2.3.4"})
"""
self.__udid = udid
self.name = udid2name(udid)
self.product = udid2product(udid)
self.wda_directory = "./ATX-WebDriverAgent"
self._procs = []
self._wda_proxy_port = None
self._wda_proxy_proc = None
self._lock = lock # only allow one xcodebuild test run
self._finished = locks.Event()
self._stop = locks.Event()
self._callback = partial(callback, self) or nop_callback
self.manually_start_wda = False
self.use_tidevice = False
self.wda_bundle_pattern = "*WebDriverAgent*"
@property
def udid(self) -> str:
return self.__udid
@property
def public_port(self):
return self._wda_proxy_port
def __repr__(self):
return "[{udid}:{name}-{product}]".format(udid=self.udid[:5] + ".." +
self.udid[-2:],
name=self.name,
product=self.product)
def __str__(self):
return repr(self)
def start(self):
""" start wda process and keep it running, until wda stopped too many times or stop() called """
self._stop.clear()
IOLoop.current().spawn_callback(self.run_wda_forever)
async def stop(self):
""" stop wda process """
if self._stop.is_set():
raise RuntimeError(self, "WDADevice is already stopped")
self._stop.set() # no need await
logger.debug("%s waiting for wda stopped ...", self)
await self._finished.wait()
logger.debug("%s wda stopped!", self)
self._finished.clear()
async def run_wda_forever(self):
"""
Args:
callback
"""
wda_fail_cnt = 0
while not self._stop.is_set():
await self._callback(self.status_preparing)
start = time.time()
ok = await self.run_webdriveragent()
if not ok:
self.destroy()
wda_fail_cnt += 1
if wda_fail_cnt > 3:
logger.error("%s Run WDA failed. -_-!", self)
break
if time.time() - start < 3.0:
logger.error("%s WDA unable to start", self)
break
logger.warning("%s wda started failed, retry after 10s", self)
if not await self._sleep(10):
break
continue
wda_fail_cnt = 0
logger.info("%s wda lanuched", self)
# wda_status() result stored in __wda_info
await self._callback(self.status_ready, self.__wda_info)
await self.watch_wda_status()
await self._callback(self.status_fatal)
self.destroy() # destroy twice to make sure no process left
self._finished.set() # no need await
def destroy(self):
logger.debug("terminate wda processes")
for p in self._procs:
p.terminate()
self._procs = []
async def _sleep(self, timeout: float):
""" return false when sleep stopped by _stop(Event) """
try:
timeout_timestamp = IOLoop.current().time() + timeout
await self._stop.wait(timeout_timestamp) # wired usage
return False
except tornado.util.TimeoutError:
return True
async def watch_wda_status(self):
"""
check WebDriverAgent all the time
"""
# check wda_status every 30s
fail_cnt = 0
last_ip = self.device_ip
while not self._stop.is_set():
if await self.wda_status():
if fail_cnt != 0:
logger.info("wda ping recovered")
fail_cnt = 0
if last_ip != self.device_ip:
last_ip = self.device_ip
await self._callback(self.status_ready, self.__wda_info)
await self._sleep(60)
logger.debug("%s is fine", self)
else:
fail_cnt += 1
logger.warning("%s wda ping error: %d", self, fail_cnt)
if fail_cnt > 3:
logger.warning("ping wda fail too many times, restart wda")
break
await self._sleep(10)
self.destroy()
@property
def device_ip(self):
""" get current device ip """
if not self.__wda_info:
return None
try:
return self.__wda_info['value']['ios']['ip']
except IndexError:
return None
async def run_webdriveragent(self) -> bool:
"""
UDID=$(idevice_id -l)
UDID=${UDID:?}
xcodebuild -project WebDriverAgent.xcodeproj \
-scheme WebDriverAgentRunner WebDriverAgentRunner id=$(idevice_id -l) test
Raises:
RuntimeError
"""
if self._procs:
self.destroy() # hotfix
#raise RuntimeError("should call destroy before run_webdriveragent", self._procs)
async with self._lock:
# holding lock, because multi wda run will raise error
# Testing failed:
# WebDriverAgentRunner-Runner.app encountered an error (Failed to install or launch the test
# runner. (Underlying error: Only directories may be uploaded. Please try again with a directory
# containing items to upload to the application_s sandbox.))
cmd = [
'xcodebuild', '-project',
os.path.join(self.wda_directory, 'WebDriverAgent.xcodeproj'),
'-scheme', 'WebDriverAgentRunner', "-destination",
'id=' + self.udid, 'test'
]
if os.getenv("TMQ") == "true":
cmd = ['tins', '-u', self.udid, 'xctest']
if self.manually_start_wda:
logger.info("Got param --manually-start-wda , will not launch wda process")
elif self.use_tidevice:
# 明确使用 tidevice 命令启动 wda
logger.info("Got param --use-tidevice , use tidevice to launch wda")
tidevice_cmd = ['tidevice', '-u', self.udid, 'wdaproxy', '-B', self.wda_bundle_pattern, '--port', '0']
self.run_background(tidevice_cmd, stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT)
else:
self.run_background(
cmd, stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT) # cwd='Appium-WebDriverAgent')
self._wda_port = freeport.get()
self._mjpeg_port = freeport.get()
self.run_background(
["./iproxy.sh",
str(self._wda_port), "8100", self.udid],
silent=True)
self.run_background(
["./iproxy.sh",
str(self._mjpeg_port), "9100", self.udid],
silent=True)
self.restart_wda_proxy()
return await self.wait_until_ready()
def run_background(self, *args, **kwargs):
if kwargs.pop("silent", False):
kwargs['stdout'] = subprocess.DEVNULL
kwargs['stderr'] = subprocess.DEVNULL
logger.debug("exec: %s", subprocess.list2cmdline(args[0]))
p = subprocess.Popen(*args, **kwargs)
self._procs.append(p)
def restart_wda_proxy(self):
if self._wda_proxy_proc:
self._wda_proxy_proc.terminate()
self._wda_proxy_port = freeport.get()
logger.debug("restart wdaproxy with port: %d", self._wda_proxy_port)
self._wda_proxy_proc = subprocess.Popen([
sys.executable, "-u", "wdaproxy-script.py",
"-p", str(self._wda_proxy_port),
"--wda-url", "http://localhost:{}".format(self._wda_port),
"--mjpeg-url", "http://localhost:{}".format(self._mjpeg_port)],
stdout=subprocess.DEVNULL) # yapf: disable
async def wait_until_ready(self, timeout: float = 60.0) -> bool:
"""
Returns:
bool
"""
deadline = time.time() + timeout
while time.time() < deadline and not self._stop.is_set():
quited = any([p.poll() is not None for p in self._procs])
if quited:
logger.warning("%s process quit %s", self,
[(p.pid, p.poll()) for p in self._procs])
return False
if await self.wda_status():
return True
await self._sleep(1)
return False
async def restart_wda(self):
self.destroy()
return await self.run_webdriveragent()
@property
def wda_device_url(self):
return "http://localhost:{}".format(self._wda_port)
async def wda_status(self):
"""
Returns:
dict or None
"""
try:
request = httpclient.HTTPRequest(self.wda_device_url + "/status",
connect_timeout=3,
request_timeout=15)
client = httpclient.AsyncHTTPClient()
resp = await client.fetch(request)
info = json.loads(resp.body)
self.__wda_info = info
return info
except httpclient.HTTPError as e:
logger.debug("%s request wda/status error: %s", self, e)
return None
except (ConnectionResetError, ConnectionRefusedError):
logger.debug("%s waiting for wda", self)
return None
except Exception as e:
logger.warning("%s ping wda unknown error: %s %s", self, type(e),
e)
return None
async def wda_screenshot_ok(self):
"""
Check if screenshot is working
Returns:
bool
"""
try:
request = httpclient.HTTPRequest(self.wda_device_url +
"/screenshot",
connect_timeout=3,
request_timeout=15)
client = httpclient.AsyncHTTPClient()
resp = await client.fetch(request)
data = json.loads(resp.body)
raw_png_data = base64.b64decode(data['value'])
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_png_data.startswith(png_header):
return False
return True
except Exception as e:
logger.warning("%s wda screenshot error: %s", self, e)
return False
async def wda_session_ok(self):
"""
check if session create ok
"""
info = await self.wda_status()
if not info:
return False
#if not info.get("sessionId"): # the latest wda /status has no sessionId
# return False
return True
async def is_wda_alive(self):
logger.debug("%s check /status", self)
if not await self.wda_session_ok():
return False
logger.debug("%s check /screenshot", self)
if not await self.wda_screenshot_ok():
return False
return True
async def wda_healthcheck(self):
client = httpclient.AsyncHTTPClient()
if not await self.is_wda_alive():
logger.warning("%s check failed -_-!", self)
await self._callback(self.status_preparing)
if not await self.restart_wda():
logger.warning("%s wda recover in healthcheck failed", self)
return
else:
logger.debug("%s all check passed ^_^", self)
await client.fetch(self.wda_device_url + "/wda/healthcheck")
if __name__ == "__main__":
# main()
pass
|
[] |
[] |
[
"TMQ"
] |
[]
|
["TMQ"]
|
python
| 1 | 0 | |
src/test/bitcoin-util-test.py
|
#!/usr/bin/python
# Copyright 2014 BitPay, Inc.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import os
import bctest
import buildenv
#if __name__ == '__main__':
# bctest.bctester(os.environ["srcdir"] + "/test/data",
# "bitcoin-util-test.json",buildenv)
|
[] |
[] |
[
"srcdir"
] |
[]
|
["srcdir"]
|
python
| 1 | 0 | |
w3c-examples/w3c-testng/src/test/java/test/headless/SampleHeadlessSauceTest.java
|
package test.headless;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.remote.RemoteWebDriver;
import java.net.MalformedURLException;
import java.net.URL;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.chrome.ChromeOptions;
import org.testng.Assert;
import org.testng.ITestResult;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
public class SampleHeadlessSauceTest {
private WebDriver driver;
@Test
public void main() throws MalformedURLException {
String sauceUserName = System.getenv("SAUCE_USERNAME");
String sauceAccessKey = System.getenv("SAUCE_ACCESS_KEY");
String URL = "https://ondemand.us-east-1.saucelabs.com/wd/hub";
ChromeOptions chromeOpts = new ChromeOptions();
chromeOpts.setExperimentalOption("w3c", true);
MutableCapabilities sauceOptions = new MutableCapabilities();
sauceOptions.setCapability("username", sauceUserName);
sauceOptions.setCapability("accessKey", sauceAccessKey);
sauceOptions.setCapability("seleniumVersion", "3.141.59");
sauceOptions.setCapability("name", "headless-chrome-test-java");
sauceOptions.setCapability("build", "Sample Headless Tests");
MutableCapabilities caps = new MutableCapabilities();
caps.setCapability("goog:chromeOptions", chromeOpts);
caps.setCapability("browserName", "chrome");
caps.setCapability("browserVersion", "latest");
caps.setCapability("platformName", "Linux");
caps.setCapability("sauce:options", sauceOptions);
driver = new RemoteWebDriver(new URL(URL), caps);
/* Goes to Sauce Lab's demo page and prints title */
driver.get("https://www.saucedemo.com");
System.out.println("title of page is: " + driver.getTitle());
Assert.assertEquals(driver.getTitle(), "Swag Labs" );
}
/* Sends results to SauceLabs.com */
@AfterMethod
public void cleanUpAfterTestMethod(ITestResult result) {
((JavascriptExecutor)driver).executeScript("sauce:job-result=" + (result.isSuccess() ? "passed" : "failed"));
driver.quit();
}
}
|
[
"\"SAUCE_USERNAME\"",
"\"SAUCE_ACCESS_KEY\""
] |
[] |
[
"SAUCE_USERNAME",
"SAUCE_ACCESS_KEY"
] |
[]
|
["SAUCE_USERNAME", "SAUCE_ACCESS_KEY"]
|
java
| 2 | 0 | |
daemon/daemon.go
|
// Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
containerd "github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/api"
"github.com/docker/docker/builder"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/errors"
"github.com/docker/engine-api/types"
containertypes "github.com/docker/engine-api/types/container"
networktypes "github.com/docker/engine-api/types/network"
registrytypes "github.com/docker/engine-api/types/registry"
"github.com/docker/engine-api/types/strslice"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
"github.com/docker/docker/daemon/network"
dmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/image"
"github.com/docker/docker/image/tarexport"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/migrate/v1"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/graphdb"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/namesgenerator"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/docker/go-connections/nat"
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libtrust"
"golang.org/x/net/context"
)
const (
// maxDownloadConcurrency is the maximum number of downloads that
// may take place at a time for each pull.
maxDownloadConcurrency = 3
// maxUploadConcurrency is the maximum number of uploads that
// may take place at a time for each push.
maxUploadConcurrency = 5
)
var (
validContainerNameChars = utils.RestrictedNameChars
validContainerNamePattern = utils.RestrictedNamePattern
errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
)
// ErrImageDoesNotExist is error returned when no image can be found for a reference.
type ErrImageDoesNotExist struct {
RefOrID string
}
func (e ErrImageDoesNotExist) Error() string {
return fmt.Sprintf("no such id: %s", e.RefOrID)
}
// Daemon holds information about the Docker daemon.
type Daemon struct {
ID string
repository string
containers container.Store
execCommands *exec.Store
referenceStore reference.Store
downloadManager *xfer.LayerDownloadManager
uploadManager *xfer.LayerUploadManager
distributionMetadataStore dmetadata.Store
trustKey libtrust.PrivateKey
idIndex *truncindex.TruncIndex
configStore *Config
statsCollector *statsCollector
defaultLogConfig containertypes.LogConfig
RegistryService *registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
volumes *store.VolumeStore
discoveryWatcher discoveryReloader
root string
seccompEnabled bool
shutdown bool
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
layerStore layer.Store
imageStore image.Store
nameIndex *registrar.Registrar
linkIndex *linkIndex
containerd libcontainerd.Client
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
}
// GetContainer looks for a container using the provided information, which could be
// one of the following inputs from the caller:
// - A full container ID, which will exact match a container in daemon's list
// - A container name, which will only exact match via the GetByName() function
// - A partial container ID prefix (e.g. short ID) of any length that is
// unique enough to only return a single container object
// If none of these searches succeed, an error is returned
func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) {
if len(prefixOrName) == 0 {
return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied"))
}
if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
// prefix is an exact match to a full container ID
return containerByID, nil
}
// GetByName will match only an exact name provided; we ignore errors
if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil {
// prefix is an exact match to a full container Name
return containerByName, nil
}
containerID, indexError := daemon.idIndex.Get(prefixOrName)
if indexError != nil {
// When truncindex defines an error type, use that instead
if indexError == truncindex.ErrNotExist {
err := fmt.Errorf("No such container: %s", prefixOrName)
return nil, errors.NewRequestNotFoundError(err)
}
return nil, indexError
}
return daemon.containers.Get(containerID), nil
}
// Exists returns a true if a container of the specified ID or name exists,
// false otherwise.
func (daemon *Daemon) Exists(id string) bool {
c, _ := daemon.GetContainer(id)
return c != nil
}
// IsPaused returns a bool indicating if the specified container is paused.
func (daemon *Daemon) IsPaused(id string) bool {
c, _ := daemon.GetContainer(id)
return c.State.IsPaused()
}
func (daemon *Daemon) containerRoot(id string) string {
return filepath.Join(daemon.repository, id)
}
// Load reads the contents of a container from disk
// This is typically done at startup.
func (daemon *Daemon) load(id string) (*container.Container, error) {
container := daemon.newBaseContainer(id)
if err := container.FromDisk(); err != nil {
return nil, err
}
if container.ID != id {
return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
}
return container, nil
}
func (daemon *Daemon) registerName(container *container.Container) error {
if daemon.Exists(container.ID) {
return fmt.Errorf("Container is already loaded")
}
if err := validateID(container.ID); err != nil {
return err
}
if container.Name == "" {
name, err := daemon.generateNewName(container.ID)
if err != nil {
return err
}
container.Name = name
if err := container.ToDiskLocking(); err != nil {
logrus.Errorf("Error saving container name to disk: %v", err)
}
}
return daemon.nameIndex.Reserve(container.Name, container.ID)
}
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(c *container.Container) error {
// Attach to stdout and stderr
if c.Config.OpenStdin {
c.NewInputPipes()
} else {
c.NewNopInputPipe()
}
daemon.containers.Add(c.ID, c)
daemon.idIndex.Add(c.ID)
return nil
}
func (daemon *Daemon) restore() error {
var (
debug = utils.IsDebugEnabled()
currentDriver = daemon.GraphDriverName()
containers = make(map[string]*container.Container)
)
if !debug {
logrus.Info("Loading containers: start.")
}
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if !debug && logrus.GetLevel() == logrus.InfoLevel {
fmt.Print(".")
}
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
var migrateLegacyLinks bool
restartContainers := make(map[*container.Container]chan struct{})
for _, c := range containers {
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
continue
}
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
continue
}
}
var wg sync.WaitGroup
var mapLock sync.Mutex
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done()
rm := c.RestartManager(false)
if c.IsRunning() || c.IsPaused() {
// Fix activityCount such that graph mounts can be unmounted later
if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil {
logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err)
return
}
if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil {
logrus.Errorf("Failed to restore with containerd: %q", err)
return
}
}
// fixme: only if not running
// get list of containers we need to restart
if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
}
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.ResetRemovalInProgress()
c.SetDead()
c.ToDisk()
}
// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
if c.HostConfig != nil && c.HostConfig.Links == nil {
migrateLegacyLinks = true
}
}(c)
}
wg.Wait()
// migrate any legacy links from sqlite
linkdbFile := filepath.Join(daemon.root, "linkgraph.db")
var legacyLinkDB *graphdb.Database
if migrateLegacyLinks {
legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile)
if err != nil {
return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err)
}
defer legacyLinkDB.Close()
}
// Now that all the containers are registered, register the links
for _, c := range containers {
if migrateLegacyLinks {
if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil {
return err
}
}
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
}
}
group := sync.WaitGroup{}
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
defer group.Done()
logrus.Debugf("Starting container %s", c.ID)
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
}
}
}
if err := daemon.containerStart(c); err != nil {
logrus.Errorf("Failed to start container %s: %s", c.ID, err)
}
close(chNotify)
}(c, notifier)
}
group.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume dirver is not available.
if _, ok := restartContainers[c]; ok {
continue
}
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
}(c)
}
group.Wait()
if !debug {
if logrus.GetLevel() == logrus.InfoLevel {
fmt.Println()
}
logrus.Info("Loading containers: done.")
}
return nil
}
func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error {
if img != nil && img.Config != nil {
if err := merge(config, img.Config); err != nil {
return err
}
}
if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 {
return fmt.Errorf("No command specified")
}
return nil
}
func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
var (
err error
id = stringid.GenerateNonCryptoID()
)
if name == "" {
if name, err = daemon.generateNewName(id); err != nil {
return "", "", err
}
return id, name, nil
}
if name, err = daemon.reserveName(id, name); err != nil {
return "", "", err
}
return id, name, nil
}
func (daemon *Daemon) reserveName(id, name string) (string, error) {
if !validContainerNamePattern.MatchString(name) {
return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
}
if name[0] != '/' {
name = "/" + name
}
if err := daemon.nameIndex.Reserve(name, id); err != nil {
if err == registrar.ErrNameReserved {
id, err := daemon.nameIndex.Get(name)
if err != nil {
logrus.Errorf("got unexpected error while looking up reserved name: %v", err)
return "", err
}
return "", fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id)
}
return "", fmt.Errorf("error reserving name: %s, error: %v", name, err)
}
return name, nil
}
func (daemon *Daemon) releaseName(name string) {
daemon.nameIndex.Release(name)
}
func (daemon *Daemon) generateNewName(id string) (string, error) {
var name string
for i := 0; i < 6; i++ {
name = namesgenerator.GetRandomName(i)
if name[0] != '/' {
name = "/" + name
}
if err := daemon.nameIndex.Reserve(name, id); err != nil {
if err == registrar.ErrNameReserved {
continue
}
return "", err
}
return name, nil
}
name = "/" + stringid.TruncateID(id)
if err := daemon.nameIndex.Reserve(name, id); err != nil {
return "", err
}
return name, nil
}
func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) {
// Generate default hostname
if config.Hostname == "" {
config.Hostname = id[:12]
}
}
func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) {
if len(configEntrypoint) != 0 {
return configEntrypoint[0], append(configEntrypoint[1:], configCmd...)
}
return configCmd[0], configCmd[1:]
}
func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID) (*container.Container, error) {
var (
id string
err error
noExplicitName = name == ""
)
id, name, err = daemon.generateIDAndName(name)
if err != nil {
return nil, err
}
daemon.generateHostname(id, config)
entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
base := daemon.newBaseContainer(id)
base.Created = time.Now().UTC()
base.Path = entrypoint
base.Args = args //FIXME: de-duplicate from config
base.Config = config
base.HostConfig = &containertypes.HostConfig{}
base.ImageID = imgID
base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName}
base.Name = name
base.Driver = daemon.GraphDriverName()
return base, err
}
// GetByName returns a container given a name.
func (daemon *Daemon) GetByName(name string) (*container.Container, error) {
if len(name) == 0 {
return nil, fmt.Errorf("No container name supplied")
}
fullName := name
if name[0] != '/' {
fullName = "/" + name
}
id, err := daemon.nameIndex.Get(fullName)
if err != nil {
return nil, fmt.Errorf("Could not find entity for %s", name)
}
e := daemon.containers.Get(id)
if e == nil {
return nil, fmt.Errorf("Could not find container for entity id %s", id)
}
return e, nil
}
// GetLabels for a container or image id
func (daemon *Daemon) GetLabels(id string) map[string]string {
// TODO: TestCase
container := daemon.containers.Get(id)
if container != nil {
return container.Config.Labels
}
img, err := daemon.GetImage(id)
if err == nil {
return img.ContainerConfig.Labels
}
return nil
}
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c)
}
// parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c)
}
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
if err == registrar.ErrNameReserved {
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
}
return err
}
daemon.linkIndex.link(parent, child, fullName)
return nil
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *Config, registryService *registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
setDefaultMtu(config)
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
setupDumpStackTrap()
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else {
realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := tempDir(config.Root, rootUID, rootGID)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
os.Setenv("TMPDIR", realTmp)
d := &Daemon{configStore: config}
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
installDefaultAppArmorProfile()
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
}
d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
StorePath: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: driverName,
GraphDriverOptions: config.GraphOptions,
UIDMaps: uidMaps,
GIDMaps: gidMaps,
})
if err != nil {
return nil, err
}
graphDriver := d.layerStore.DriverName()
imageRoot := filepath.Join(config.Root, "image", graphDriver)
// Configure and validate the kernels security support
if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
return nil, err
}
d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, maxDownloadConcurrency)
d.uploadManager = xfer.NewLayerUploadManager(maxUploadConcurrency)
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
if err != nil {
return nil, err
}
// Configure the volumes driver
volStore, err := configureVolumes(config, rootUID, rootGID)
if err != nil {
return nil, err
}
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700); err != nil {
return nil, err
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
eventsService := events.New()
referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
}
if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil {
return nil, fmt.Errorf("Couldn't restore custom images: %s", err)
}
migrationStart := time.Now()
if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
}
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as its read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
d.netController, err = d.initNetworkController(config)
if err != nil {
return nil, fmt.Errorf("Error initializing network controller: %v", err)
}
sysInfo := sysinfo.New(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux/FreeBSD.
if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled {
return nil, fmt.Errorf("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
d.execCommands = exec.NewStore()
d.referenceStore = referenceStore
d.distributionMetadataStore = distributionMetadataStore
d.trustKey = trustKey
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.defaultLogConfig = containertypes.LogConfig{
Type: config.LogConfig.Type,
Config: config.LogConfig.Config,
}
d.RegistryService = registryService
d.EventsService = eventsService
d.volumes = volStore
d.root = config.Root
d.uidMaps = uidMaps
d.gidMaps = gidMaps
d.seccompEnabled = sysInfo.Seccomp
d.nameIndex = registrar.NewRegistrar()
d.linkIndex = newLinkIndex()
go d.execCommandGC()
d.containerd, err = containerdRemote.Client(d)
if err != nil {
return nil, err
}
if err := d.restore(); err != nil {
return nil, err
}
return d, nil
}
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
// TODO(windows): Handle docker restart with paused containers
if c.IsPaused() {
// To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately.
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpause it", c.ID)
sig, ok := signal.SignalMap["TERM"]
if !ok {
return fmt.Errorf("System doesn not support SIGTERM")
}
if err := daemon.kill(c, int(sig)); err != nil {
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
}
if err := daemon.containerUnpause(c); err != nil {
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
}
if _, err := c.WaitStop(10 * time.Second); err != nil {
logrus.Debugf("container %s failed to exit in 10 second of SIGTERM, sending SIGKILL to force", c.ID)
sig, ok := signal.SignalMap["KILL"]
if !ok {
return fmt.Errorf("System does not support SIGKILL")
}
if err := daemon.kill(c, int(sig)); err != nil {
logrus.Errorf("Failed to SIGKILL container %s", c.ID)
}
c.WaitStop(-1 * time.Second)
return err
}
}
// If container failed to exit in 10 seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, 10); err != nil {
return fmt.Errorf("Stop container %s with error: %v", c.ID, err)
}
c.WaitStop(-1 * time.Second)
return nil
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true
if daemon.containers != nil {
logrus.Debug("starting clean shutdown of all containers...")
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return
}
logrus.Debugf("stopping %s", c.ID)
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
}
if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
daemon.cleanupMountsByID(mountid)
}
logrus.Debugf("container stopped %s", c.ID)
})
}
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop()
}
if daemon.layerStore != nil {
if err := daemon.layerStore.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
}
}
if err := daemon.cleanupMounts(); err != nil {
return err
}
return nil
}
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
dir, err := container.RWLayer.Mount(container.GetMountLabel())
if err != nil {
return err
}
logrus.Debugf("container mounted via layerStore: %v", dir)
if container.BaseFS != dir {
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
if container.BaseFS != "" && runtime.GOOS != "windows" {
daemon.Unmount(container)
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
}
}
container.BaseFS = dir // TODO: combine these fields
return nil
}
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *container.Container) error {
if err := container.RWLayer.Unmount(); err != nil {
logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
return err
}
return nil
}
func (daemon *Daemon) kill(c *container.Container, sig int) error {
return daemon.containerd.Signal(c.ID, sig)
}
func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} {
return daemon.statsCollector.collect(c)
}
func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) {
daemon.statsCollector.unsubscribe(c, ch)
}
func (daemon *Daemon) changes(container *container.Container) ([]archive.Change, error) {
return container.RWLayer.Changes()
}
func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
operationCancelled := false
for prog := range progressChan {
if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
// don't log broken pipe errors as this is the normal case when a client aborts
if isBrokenPipe(err) {
logrus.Info("Pull session cancelled")
} else {
logrus.Errorf("error writing progress to client: %v", err)
}
cancelFunc()
operationCancelled = true
// Don't return, because we need to continue draining
// progressChan until it's closed to avoid a deadlock.
}
}
}
func isBrokenPipe(e error) bool {
if netErr, ok := e.(*net.OpError); ok {
e = netErr.Err
if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
e = sysErr.Err
}
}
return e == syscall.EPIPE
}
// ExportImage exports a list of images to the given output stream. The
// exported images are archived into a tar when written to the output
// stream. All images with the given tag and all versions containing
// the same tag are exported. names is the set of tags to export, and
// outStream is the writer which the images are written to.
func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon)
return imageExporter.Save(names, outStream)
}
// LookupImage looks up an image by name and returns it as an ImageInspect
// structure.
func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
img, err := daemon.GetImage(name)
if err != nil {
return nil, fmt.Errorf("No such image: %s", name)
}
refs := daemon.referenceStore.References(img.ID())
repoTags := []string{}
repoDigests := []string{}
for _, ref := range refs {
switch ref.(type) {
case reference.NamedTagged:
repoTags = append(repoTags, ref.String())
case reference.Canonical:
repoDigests = append(repoDigests, ref.String())
}
}
var size int64
var layerMetadata map[string]string
layerID := img.RootFS.ChainID()
if layerID != "" {
l, err := daemon.layerStore.Get(layerID)
if err != nil {
return nil, err
}
defer layer.ReleaseAndLog(daemon.layerStore, l)
size, err = l.Size()
if err != nil {
return nil, err
}
layerMetadata, err = l.Metadata()
if err != nil {
return nil, err
}
}
comment := img.Comment
if len(comment) == 0 && len(img.History) > 0 {
comment = img.History[len(img.History)-1].Comment
}
imageInspect := &types.ImageInspect{
ID: img.ID().String(),
RepoTags: repoTags,
RepoDigests: repoDigests,
Parent: img.Parent.String(),
Comment: comment,
Created: img.Created.Format(time.RFC3339Nano),
Container: img.Container,
ContainerConfig: &img.ContainerConfig,
DockerVersion: img.DockerVersion,
Author: img.Author,
Config: img.Config,
Architecture: img.Architecture,
Os: img.OS,
Size: size,
VirtualSize: size, // TODO: field unused, deprecate
RootFS: rootFSToAPIType(img.RootFS),
}
imageInspect.GraphDriver.Name = daemon.GraphDriverName()
imageInspect.GraphDriver.Data = layerMetadata
return imageInspect, nil
}
// LoadImage uploads a set of images into the repository. This is the
// complement of ImageExport. The input stream is an uncompressed tar
// ball containing images and metadata.
func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon)
return imageExporter.Load(inTar, outStream, quiet)
}
// ImageHistory returns a slice of ImageHistory structures for the specified image
// name by walking the image lineage.
func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) {
img, err := daemon.GetImage(name)
if err != nil {
return nil, err
}
history := []*types.ImageHistory{}
layerCounter := 0
rootFS := *img.RootFS
rootFS.DiffIDs = nil
for _, h := range img.History {
var layerSize int64
if !h.EmptyLayer {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, fmt.Errorf("too many non-empty layers in History section")
}
rootFS.Append(img.RootFS.DiffIDs[layerCounter])
l, err := daemon.layerStore.Get(rootFS.ChainID())
if err != nil {
return nil, err
}
layerSize, err = l.DiffSize()
layer.ReleaseAndLog(daemon.layerStore, l)
if err != nil {
return nil, err
}
layerCounter++
}
history = append([]*types.ImageHistory{{
ID: "<missing>",
Created: h.Created.Unix(),
CreatedBy: h.CreatedBy,
Comment: h.Comment,
Size: layerSize,
}}, history...)
}
// Fill in image IDs and tags
histImg := img
id := img.ID()
for _, h := range history {
h.ID = id.String()
var tags []string
for _, r := range daemon.referenceStore.References(id) {
if _, ok := r.(reference.NamedTagged); ok {
tags = append(tags, r.String())
}
}
h.Tags = tags
id = histImg.Parent
if id == "" {
break
}
histImg, err = daemon.GetImage(id.String())
if err != nil {
break
}
}
return history, nil
}
// GetImageID returns an image ID corresponding to the image referred to by
// refOrID.
func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) {
id, ref, err := reference.ParseIDOrReference(refOrID)
if err != nil {
return "", err
}
if id != "" {
if _, err := daemon.imageStore.Get(image.ID(id)); err != nil {
return "", ErrImageDoesNotExist{refOrID}
}
return image.ID(id), nil
}
if id, err := daemon.referenceStore.Get(ref); err == nil {
return id, nil
}
if tagged, ok := ref.(reference.NamedTagged); ok {
if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil {
for _, namedRef := range daemon.referenceStore.References(id) {
if namedRef.Name() == ref.Name() {
return id, nil
}
}
}
}
// Search based on ID
if id, err := daemon.imageStore.Search(refOrID); err == nil {
return id, nil
}
return "", ErrImageDoesNotExist{refOrID}
}
// GetImage returns an image corresponding to the image referred to by refOrID.
func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
imgID, err := daemon.GetImageID(refOrID)
if err != nil {
return nil, err
}
return daemon.imageStore.Get(imgID)
}
// GetImageOnBuild looks up a Docker image referenced by `name`.
func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) {
img, err := daemon.GetImage(name)
if err != nil {
return nil, err
}
return img, nil
}
// GraphDriverName returns the name of the graph driver used by the layer.Store
func (daemon *Daemon) GraphDriverName() string {
return daemon.layerStore.DriverName()
}
// GetUIDGIDMaps returns the current daemon's user namespace settings
// for the full uid and gid maps which will be applied to containers
// started in this instance.
func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
return daemon.uidMaps, daemon.gidMaps
}
// GetRemappedUIDGID returns the current daemon's uid and gid values
// if user namespaces are in use for this daemon instance. If not
// this function will return "real" root values of 0, 0.
func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
return uid, gid
}
// GetCachedImage returns the most recent created image that is a child
// of the image with imgID, that had the same config when it was
// created. nil is returned if a child cannot be found. An error is
// returned if the parent image cannot be found.
func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
// Loop on the children of the given image and check the config
getMatch := func(siblings []image.ID) (*image.Image, error) {
var match *image.Image
for _, id := range siblings {
img, err := daemon.imageStore.Get(id)
if err != nil {
return nil, fmt.Errorf("unable to find image %q", id)
}
if runconfig.Compare(&img.ContainerConfig, config) {
// check for the most up to date match
if match == nil || match.Created.Before(img.Created) {
match = img
}
}
}
return match, nil
}
// In this case, this is `FROM scratch`, which isn't an actual image.
if imgID == "" {
images := daemon.imageStore.Map()
var siblings []image.ID
for id, img := range images {
if img.Parent == imgID {
siblings = append(siblings, id)
}
}
return getMatch(siblings)
}
// find match from child images
siblings := daemon.imageStore.Children(imgID)
return getMatch(siblings)
}
// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) {
cache, err := daemon.GetCachedImage(image.ID(imgID), cfg)
if cache == nil || err != nil {
return "", err
}
return cache.ID().String(), nil
}
// tempDir returns the default directory to use for temporary files.
func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
}
return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
}
func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error {
container.Lock()
defer container.Unlock()
return parseSecurityOpt(container, hostConfig)
}
func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error {
// Do not lock while creating volumes since this could be calling out to external plugins
// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
if err := daemon.registerMountPoints(container, hostConfig); err != nil {
return err
}
container.Lock()
defer container.Unlock()
// Register any links from the host config before starting the container
if err := daemon.registerLinks(container, hostConfig); err != nil {
return err
}
// make sure links is not nil
// this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links
if hostConfig.Links == nil {
hostConfig.Links = []string{}
}
container.HostConfig = hostConfig
return container.ToDisk()
}
func (daemon *Daemon) setupInitLayer(initPath string) error {
rootUID, rootGID := daemon.GetRemappedUIDGID()
return setupInitLayer(initPath, rootUID, rootGID)
}
func setDefaultMtu(config *Config) {
// do nothing if the config does not have the default 0 value.
if config.Mtu != 0 {
return
}
config.Mtu = defaultNetworkMtu
}
// verifyContainerSettings performs validation of the hostconfig and config
// structures.
func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
// First perform verification of settings common across all platforms.
if config != nil {
if config.WorkingDir != "" {
config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics
if !system.IsAbs(config.WorkingDir) {
return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path", config.WorkingDir)
}
}
if len(config.StopSignal) > 0 {
_, err := signal.ParseSignal(config.StopSignal)
if err != nil {
return nil, err
}
}
// Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant.
if len(config.Hostname) > 0 {
// RFC1123 specifies that 63 bytes is the maximium length
// Windows has the limitation of 63 bytes in length
// Linux hostname is limited to HOST_NAME_MAX=64, not not including the terminating null byte.
// We limit the length to 63 bytes here to match RFC1035 and RFC1123.
matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", config.Hostname)
if len(config.Hostname) > 63 || !matched {
return nil, fmt.Errorf("invalid hostname format: %s", config.Hostname)
}
}
}
if hostConfig == nil {
return nil, nil
}
for port := range hostConfig.PortBindings {
_, portStr := nat.SplitProtoPort(string(port))
if _, err := nat.ParsePort(portStr); err != nil {
return nil, fmt.Errorf("Invalid port specification: %q", portStr)
}
for _, pb := range hostConfig.PortBindings[port] {
_, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort))
if err != nil {
return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort)
}
}
}
// Now do platform-specific verification
return verifyPlatformContainerSettings(daemon, hostConfig, config, update)
}
// Checks if the client set configurations for more than one network while creating a container
func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error {
if nwConfig == nil || len(nwConfig.EndpointsConfig) <= 1 {
return nil
}
l := make([]string, 0, len(nwConfig.EndpointsConfig))
for k := range nwConfig.EndpointsConfig {
l = append(l, k)
}
err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", "))
return errors.NewBadRequestError(err)
}
func configureVolumes(config *Config, rootUID, rootGID int) (*store.VolumeStore, error) {
volumesDriver, err := local.New(config.Root, rootUID, rootGID)
if err != nil {
return nil, err
}
volumedrivers.Register(volumesDriver, volumesDriver.Name())
return store.New(config.Root)
}
// AuthenticateToRegistry checks the validity of credentials in authConfig
func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) {
return daemon.RegistryService.Auth(authConfig, dockerversion.DockerUserAgent(ctx))
}
// SearchRegistryForImages queries the registry for images matching
// term. authConfig is used to login.
func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, term string,
authConfig *types.AuthConfig,
headers map[string][]string) (*registrytypes.SearchResults, error) {
return daemon.RegistryService.Search(term, authConfig, dockerversion.DockerUserAgent(ctx), headers)
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// GetContainerStats collects all the stats published by a container
func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) {
stats, err := daemon.stats(container)
if err != nil {
return nil, err
}
if stats.Networks, err = daemon.getNetworkStats(container); err != nil {
return nil, err
}
return stats, nil
}
// Resolve Network SandboxID in case the container reuse another container's network stack
func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) {
curr := c
for curr.HostConfig.NetworkMode.IsContainer() {
containerID := curr.HostConfig.NetworkMode.ConnectedContainer()
connected, err := daemon.GetContainer(containerID)
if err != nil {
return "", fmt.Errorf("Could not get container for %s", containerID)
}
curr = connected
}
return curr.NetworkSettings.SandboxID, nil
}
func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) {
sandboxID, err := daemon.getNetworkSandboxID(c)
if err != nil {
return nil, err
}
sb, err := daemon.netController.SandboxByID(sandboxID)
if err != nil {
return nil, err
}
lnstats, err := sb.Statistics()
if err != nil {
return nil, err
}
stats := make(map[string]types.NetworkStats)
// Convert libnetwork nw stats into engine-api stats
for ifName, ifStats := range lnstats {
stats[ifName] = types.NetworkStats{
RxBytes: ifStats.RxBytes,
RxPackets: ifStats.RxPackets,
RxErrors: ifStats.RxErrors,
RxDropped: ifStats.RxDropped,
TxBytes: ifStats.TxBytes,
TxPackets: ifStats.TxPackets,
TxErrors: ifStats.TxErrors,
TxDropped: ifStats.TxDropped,
}
}
return stats, nil
}
// newBaseContainer creates a new container with its initial
// configuration based on the root storage from the daemon.
func (daemon *Daemon) newBaseContainer(id string) *container.Container {
return container.NewBaseContainer(id, daemon.containerRoot(id))
}
// initDiscovery initializes the discovery watcher for this daemon.
func (daemon *Daemon) initDiscovery(config *Config) error {
advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
if err != nil {
if err == errDiscoveryDisabled {
return nil
}
return err
}
config.ClusterAdvertise = advertise
discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
// Reload reads configuration changes and modifies the
// daemon according to those changes.
// These are the settings that Reload changes:
// - Daemon labels.
// - Daemon debug log level.
// - Cluster discovery (reconfigure and restart).
func (daemon *Daemon) Reload(config *Config) error {
daemon.configStore.reloadLock.Lock()
defer daemon.configStore.reloadLock.Unlock()
if config.IsValueSet("labels") {
daemon.configStore.Labels = config.Labels
}
if config.IsValueSet("debug") {
daemon.configStore.Debug = config.Debug
}
return daemon.reloadClusterDiscovery(config)
}
func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
var err error
newAdvertise := daemon.configStore.ClusterAdvertise
newClusterStore := daemon.configStore.ClusterStore
if config.IsValueSet("cluster-advertise") {
if config.IsValueSet("cluster-store") {
newClusterStore = config.ClusterStore
}
newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise)
if err != nil && err != errDiscoveryDisabled {
return err
}
}
// check discovery modifications
if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) {
return nil
}
// enable discovery for the first time if it was not previously enabled
if daemon.discoveryWatcher == nil {
discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
} else {
if err == errDiscoveryDisabled {
// disable discovery if it was previously enabled and it's disabled now
daemon.discoveryWatcher.Stop()
} else {
// reload discovery
if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
return err
}
}
}
daemon.configStore.ClusterStore = newClusterStore
daemon.configStore.ClusterOpts = config.ClusterOpts
daemon.configStore.ClusterAdvertise = newAdvertise
if daemon.netController == nil {
return nil
}
netOptions, err := daemon.networkOptions(daemon.configStore)
if err != nil {
logrus.Warnf("Failed to reload configuration with network controller: %v", err)
return nil
}
err = daemon.netController.ReloadConfiguration(netOptions...)
if err != nil {
logrus.Warnf("Failed to reload configuration with network controller: %v", err)
}
return nil
}
func validateID(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}
func isBridgeNetworkDisabled(config *Config) bool {
return config.bridgeConfig.Iface == disableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *Config) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
return options, nil
}
func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
|
[
"\"DOCKER_DRIVER\"",
"\"DOCKER_TMPDIR\""
] |
[] |
[
"DOCKER_DRIVER",
"DOCKER_TMPDIR"
] |
[]
|
["DOCKER_DRIVER", "DOCKER_TMPDIR"]
|
go
| 2 | 0 | |
main.go
|
package main
import (
"log"
"net/http"
"os"
"sync"
"yet-another-covid-map-api/casecount"
"yet-another-covid-map-api/requests"
"yet-another-covid-map-api/schedule"
)
var port string
func setupRoutes() {
http.Handle("/", http.FileServer(http.Dir("./static")))
http.HandleFunc("/cases", requests.GetCaseCounts)
http.HandleFunc("/news", requests.GetNewsForCountry)
}
func init() {
port = os.Getenv("PORT")
if port == "" {
port = "8080"
}
}
func main() {
// the John Hopkins data is updated at about 23:59 UTC everyday, so we will call update at 1am UTC
schedule.CallFunctionDaily(casecount.UpdateCaseCounts, 1)
wg := sync.WaitGroup{}
wg.Add(1)
setupRoutes()
log.Printf("Server started at port %s", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
wg.Wait()
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
lxc/main_aliases.go
|
package main
import (
"fmt"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"strings"
"syscall"
"github.com/lxc/lxd/lxc/config"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/i18n"
)
// defaultAliases contains LXC's built-in command line aliases. The built-in
// aliases are checked only if no user-defined alias was found.
var defaultAliases = map[string]string{
"shell": "exec @ARGS@ -- su -l",
}
func findAlias(aliases map[string]string, origArgs []string) ([]string, []string, bool) {
foundAlias := false
aliasKey := []string{}
aliasValue := []string{}
for k, v := range aliases {
foundAlias = true
for i, key := range strings.Split(k, " ") {
if len(origArgs) <= i+1 || origArgs[i+1] != key {
foundAlias = false
break
}
}
if foundAlias {
aliasKey = strings.Split(k, " ")
aliasValue = strings.Split(v, " ")
break
}
}
return aliasKey, aliasValue, foundAlias
}
func expandAlias(conf *config.Config, origArgs []string) ([]string, bool) {
aliasKey, aliasValue, foundAlias := findAlias(conf.Aliases, origArgs)
if !foundAlias {
aliasKey, aliasValue, foundAlias = findAlias(defaultAliases, origArgs)
if !foundAlias {
return []string{}, false
}
}
var newArgs []string
if !strings.HasPrefix(aliasValue[0], "/") {
newArgs = append(newArgs, origArgs[0])
}
hasReplacedArgsVar := false
for i, aliasArg := range aliasValue {
if aliasArg == "@ARGS@" && len(origArgs) > i {
newArgs = append(newArgs, origArgs[i+1:]...)
hasReplacedArgsVar = true
} else {
newArgs = append(newArgs, aliasArg)
}
}
if !hasReplacedArgsVar {
// Add the rest of the arguments
newArgs = append(newArgs, origArgs[len(aliasKey)+1:]...)
}
return newArgs, true
}
func execIfAliases() {
args := os.Args
// Avoid loops
if os.Getenv("LXC_ALIASES") == "1" {
return
}
// Figure out the config directory and config path
var configDir string
if os.Getenv("LXD_CONF") != "" {
configDir = os.Getenv("LXD_CONF")
} else if os.Getenv("HOME") != "" {
configDir = path.Join(os.Getenv("HOME"), ".config", "lxc")
} else {
user, err := user.Current()
if err != nil {
return
}
configDir = path.Join(user.HomeDir, ".config", "lxc")
}
confPath := os.ExpandEnv(path.Join(configDir, "config.yml"))
// Load the configuration
var conf *config.Config
var err error
if shared.PathExists(confPath) {
conf, err = config.LoadConfig(confPath)
if err != nil {
return
}
} else {
conf = config.NewConfig(filepath.Dir(confPath), true)
}
// Expand the aliases
newArgs, expanded := expandAlias(conf, args)
if !expanded {
return
}
// Look for the executable
path, err := exec.LookPath(newArgs[0])
if err != nil {
fmt.Fprintf(os.Stderr, i18n.G("Processing aliases failed: %s\n"), err)
os.Exit(1)
}
// Re-exec
environ := syscall.Environ()
environ = append(environ, "LXC_ALIASES=1")
ret := syscall.Exec(path, newArgs, environ)
fmt.Fprintf(os.Stderr, i18n.G("Processing aliases failed: %s\n"), ret)
os.Exit(1)
}
|
[
"\"LXC_ALIASES\"",
"\"LXD_CONF\"",
"\"LXD_CONF\"",
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"LXC_ALIASES",
"HOME",
"LXD_CONF"
] |
[]
|
["LXC_ALIASES", "HOME", "LXD_CONF"]
|
go
| 3 | 0 | |
pkg/network/openshift_sdn.go
|
package network
import (
"net"
"os"
"path/filepath"
"reflect"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
netv1 "github.com/openshift/api/network/v1"
operv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/cluster-network-operator/pkg/render"
)
// renderOpenShiftSDN returns the manifests for the openshift-sdn.
// This creates
// - the ClusterNetwork object
// - the sdn namespace
// - the sdn daemonset
// - the openvswitch daemonset
// and some other small things.
func renderOpenShiftSDN(conf *operv1.NetworkSpec, manifestDir string) ([]*uns.Unstructured, error) {
c := conf.DefaultNetwork.OpenShiftSDNConfig
objs := []*uns.Unstructured{}
data := render.MakeRenderData()
data.Data["ReleaseVersion"] = os.Getenv("RELEASE_VERSION")
data.Data["InstallOVS"] = (c.UseExternalOpenvswitch == nil || *c.UseExternalOpenvswitch == false)
data.Data["SDNImage"] = os.Getenv("SDN_IMAGE")
data.Data["CNIPluginsImage"] = os.Getenv("CNI_PLUGINS_IMAGE")
data.Data["KUBERNETES_SERVICE_HOST"] = os.Getenv("KUBERNETES_SERVICE_HOST")
data.Data["KUBERNETES_SERVICE_PORT"] = os.Getenv("KUBERNETES_SERVICE_PORT")
data.Data["Mode"] = c.Mode
data.Data["CNIConfDir"] = pluginCNIConfDir(conf)
data.Data["CNIBinDir"] = CNIBinDir
clusterNetwork, err := clusterNetwork(conf)
if err != nil {
return nil, errors.Wrap(err, "failed to build ClusterNetwork")
}
data.Data["ClusterNetwork"] = clusterNetwork
kpcDefaults := map[string]operv1.ProxyArgumentList{
"metrics-bind-address": {"0.0.0.0"},
"metrics-port": {"9101"},
"healthz-port": {"10256"},
"proxy-mode": {"iptables"},
"iptables-masquerade-bit": {"0"},
}
kpcOverrides := map[string]operv1.ProxyArgumentList{}
if *c.EnableUnidling {
// We already validated that proxy-mode was either unset or iptables.
kpcOverrides["proxy-mode"] = operv1.ProxyArgumentList{"unidling+iptables"}
}
kpc, err := kubeProxyConfiguration(kpcDefaults, conf, kpcOverrides)
if err != nil {
return nil, errors.Wrap(err, "failed to build kube-proxy config")
}
data.Data["KubeProxyConfig"] = kpc
manifests, err := render.RenderDir(filepath.Join(manifestDir, "network/openshift-sdn"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render manifests")
}
objs = append(objs, manifests...)
return objs, nil
}
// validateOpenShiftSDN checks that the openshift-sdn specific configuration
// is basically sane.
func validateOpenShiftSDN(conf *operv1.NetworkSpec) []error {
out := []error{}
if len(conf.ClusterNetwork) == 0 {
out = append(out, errors.Errorf("ClusterNetwork cannot be empty"))
}
if len(conf.ServiceNetwork) != 1 {
out = append(out, errors.Errorf("ServiceNetwork must have exactly 1 entry"))
}
sc := conf.DefaultNetwork.OpenShiftSDNConfig
if sc != nil {
if sc.Mode != "" && sdnPluginName(sc.Mode) == "" {
out = append(out, errors.Errorf("invalid openshift-sdn mode %q", sc.Mode))
}
if sc.VXLANPort != nil && (*sc.VXLANPort < 1 || *sc.VXLANPort > 65535) {
out = append(out, errors.Errorf("invalid VXLANPort %d", *sc.VXLANPort))
}
if sc.MTU != nil && (*sc.MTU < 576 || *sc.MTU > 65536) {
out = append(out, errors.Errorf("invalid MTU %d", *sc.MTU))
}
// the proxy mode must be unset or iptables for unidling to work
if (sc.EnableUnidling == nil || *sc.EnableUnidling) &&
conf.KubeProxyConfig != nil && conf.KubeProxyConfig.ProxyArguments != nil &&
len(conf.KubeProxyConfig.ProxyArguments["proxy-mode"]) > 0 &&
conf.KubeProxyConfig.ProxyArguments["proxy-mode"][0] != "iptables" {
out = append(out, errors.Errorf("invalid proxy-mode - when unidling is enabled, proxy-mode must be \"iptables\""))
}
}
proxyErrs := validateKubeProxy(conf)
out = append(out, proxyErrs...)
return out
}
// isOpenShiftSDNChangeSafe ensures no unsafe changes are applied to the running
// network
// It allows changing only useExternalOpenvswitch and enableUnidling.
// In the future, we may support rolling out MTU or external openvswitch alterations.
// as with all is*ChangeSafe functions, defaults have already been applied.
func isOpenShiftSDNChangeSafe(prev, next *operv1.NetworkSpec) []error {
pn := prev.DefaultNetwork.OpenShiftSDNConfig
nn := next.DefaultNetwork.OpenShiftSDNConfig
errs := []error{}
if reflect.DeepEqual(pn, nn) {
return errs
}
if pn.Mode != nn.Mode {
errs = append(errs, errors.Errorf("cannot change openshift-sdn mode"))
}
// deepequal is nil-safe
if !reflect.DeepEqual(pn.VXLANPort, nn.VXLANPort) {
errs = append(errs, errors.Errorf("cannot change openshift-sdn vxlanPort"))
}
if !reflect.DeepEqual(pn.MTU, nn.MTU) {
errs = append(errs, errors.Errorf("cannot change openshift-sdn mtu"))
}
// It is allowed to change useExternalOpenvswitch and enableUnidling
return errs
}
func fillOpenShiftSDNDefaults(conf, previous *operv1.NetworkSpec, hostMTU int) {
// NOTE: If you change any defaults, and it's not a safe change to roll out
// to existing clusters, you MUST use the value from previous instead.
if conf.DeployKubeProxy == nil {
prox := false
conf.DeployKubeProxy = &prox
}
if conf.KubeProxyConfig == nil {
conf.KubeProxyConfig = &operv1.ProxyConfig{}
}
if conf.KubeProxyConfig.BindAddress == "" {
conf.KubeProxyConfig.BindAddress = "0.0.0.0"
}
if conf.KubeProxyConfig.ProxyArguments == nil {
conf.KubeProxyConfig.ProxyArguments = map[string]operv1.ProxyArgumentList{}
}
if conf.DefaultNetwork.OpenShiftSDNConfig == nil {
conf.DefaultNetwork.OpenShiftSDNConfig = &operv1.OpenShiftSDNConfig{}
}
sc := conf.DefaultNetwork.OpenShiftSDNConfig
if sc.VXLANPort == nil {
var port uint32 = 4789
sc.VXLANPort = &port
}
if sc.EnableUnidling == nil {
truth := true
sc.EnableUnidling = &truth
}
// MTU is currently the only field we pull from previous.
// If it's not supplied, we infer it from the node on which we're running.
// However, this can never change, so we always prefer previous.
if sc.MTU == nil {
var mtu uint32 = uint32(hostMTU) - 50 // 50 byte VXLAN header
if previous != nil &&
previous.DefaultNetwork.Type == operv1.NetworkTypeOpenShiftSDN &&
previous.DefaultNetwork.OpenShiftSDNConfig != nil &&
previous.DefaultNetwork.OpenShiftSDNConfig.MTU != nil {
mtu = *previous.DefaultNetwork.OpenShiftSDNConfig.MTU
}
sc.MTU = &mtu
}
if sc.Mode == "" {
sc.Mode = operv1.SDNModeNetworkPolicy
}
}
func sdnPluginName(n operv1.SDNMode) string {
switch n {
case operv1.SDNModeSubnet:
return "redhat/openshift-ovs-subnet"
case operv1.SDNModeMultitenant:
return "redhat/openshift-ovs-multitenant"
case operv1.SDNModeNetworkPolicy:
return "redhat/openshift-ovs-networkpolicy"
}
return ""
}
// clusterNetwork builds the ClusterNetwork used by both the controller and the node
func clusterNetwork(conf *operv1.NetworkSpec) (string, error) {
c := conf.DefaultNetwork.OpenShiftSDNConfig
networks := []netv1.ClusterNetworkEntry{}
for _, entry := range conf.ClusterNetwork {
_, cidr, err := net.ParseCIDR(entry.CIDR) // already validated
if err != nil {
return "", err
}
_, size := cidr.Mask.Size()
hostSubnetLength := uint32(size) - entry.HostPrefix
networks = append(networks, netv1.ClusterNetworkEntry{CIDR: entry.CIDR, HostSubnetLength: hostSubnetLength})
}
cn := netv1.ClusterNetwork{
TypeMeta: metav1.TypeMeta{
APIVersion: "network.openshift.io/v1",
Kind: "ClusterNetwork",
},
ObjectMeta: metav1.ObjectMeta{
Name: netv1.ClusterNetworkDefault,
},
PluginName: sdnPluginName(c.Mode),
Network: networks[0].CIDR,
HostSubnetLength: networks[0].HostSubnetLength,
ClusterNetworks: networks,
ServiceNetwork: conf.ServiceNetwork[0],
VXLANPort: c.VXLANPort,
MTU: c.MTU,
}
cnBuf, err := yaml.Marshal(cn)
if err != nil {
return "", err
}
return string(cnBuf), nil
}
|
[
"\"RELEASE_VERSION\"",
"\"SDN_IMAGE\"",
"\"CNI_PLUGINS_IMAGE\"",
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"SDN_IMAGE",
"KUBERNETES_SERVICE_HOST",
"RELEASE_VERSION",
"KUBERNETES_SERVICE_PORT",
"CNI_PLUGINS_IMAGE"
] |
[]
|
["SDN_IMAGE", "KUBERNETES_SERVICE_HOST", "RELEASE_VERSION", "KUBERNETES_SERVICE_PORT", "CNI_PLUGINS_IMAGE"]
|
go
| 5 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_example.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
allennlp/__main__.py
|
#!/usr/bin/env python
import logging
import os
import sys
if os.environ.get("ALLENNLP_DEBUG"):
LEVEL = logging.DEBUG
else:
level_name = os.environ.get("ALLENNLP_LOG_LEVEL")
LEVEL = logging._nameToLevel.get(level_name, logging.INFO)
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=LEVEL)
# filelock emits too many messages, so tell it to be quiet unless it has something
# important to say.
_filelock_logger = logging.getLogger("filelock")
_filelock_logger.setLevel(logging.WARNING)
from allennlp.commands import main # noqa
def run():
main(prog="allennlp")
if __name__ == "__main__":
run()
|
[] |
[] |
[
"ALLENNLP_DEBUG",
"ALLENNLP_LOG_LEVEL"
] |
[]
|
["ALLENNLP_DEBUG", "ALLENNLP_LOG_LEVEL"]
|
python
| 2 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
autodoc_mock_imports = ["audioio", "adafruit_waveform", "pulseio"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Adafruit RTTTL Library'
copyright = u'2017 Scott Shawcroft'
author = u'Scott Shawcroft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitRTTTLLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitRTTTLLibrary.tex', u'Adafruit RTTTL Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'adafruitRTTTLlibrary', u'Adafruit RTTTL Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitRTTTLLibrary', u'Adafruit RTTTL Library Documentation',
author, 'AdafruitRTTTLLibrary', 'One line description of project.',
'Miscellaneous'),
]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
plugin/server.go
|
package plugin
import (
"encoding/base64"
"fmt"
"net"
"os"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/services/kms"
"github.com/golang/glog"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
k8spb "github.com/AliyunContainerService/ack-kms-plugin/v1beta1"
)
const (
// Unix Domain Socket
netProtocol = "unix"
// Version is the current kms api version
Version = "v1beta1"
runtime = "Alibaba Cloud KMS"
runtimeVersion = "0.1.0"
// REGION is region id env
REGION = "REGION"
//KEY_USAGE_ENCRYPT_DECRYPT is the usage of kms key
KEY_USAGE_ENCRYPT_DECRYPT = "ENCRYPT/DECRYPT"
// HTTPS protocol
HTTPS = "https"
)
// KMSServer is t CloudKMS plugin for K8S.
type KMSServer struct {
client *kms.Client
domain string //kms domain
keyID string // *kms.KeyMetadata
pathToUnixSocket string
net.Listener
*grpc.Server
}
// New creates an instance of the KMS Service Server.
func New(pathToUnixSocketFile, keyID string) (*KMSServer, error) {
KMSServer := new(KMSServer)
KMSServer.pathToUnixSocket = pathToUnixSocketFile
KMSServer.keyID = keyID
region := GetMetaData(RegionID)
if region == "" {
return nil, fmt.Errorf("empty region set in env")
}
KMSServer.domain = fmt.Sprintf("kms-vpc.%s.aliyuncs.com", region)
//TODO init kms client with sts token
accessKey := os.Getenv("ACCESS_KEY_ID")
accessSecret := os.Getenv("ACCESS_KEY_SECRET")
if accessKey == "" || accessSecret == "" {
return nil, fmt.Errorf("empty AK env set in env")
}
client, err := kms.NewClientWithAccessKey(region, accessKey, accessSecret)
if err != nil {
return nil, fmt.Errorf("failed to init kms client, err: %v", client)
}
KMSServer.client = client
return KMSServer, nil
}
//generate alibaba cloud kms key
func (s *KMSServer) getKey(client *kms.Client) (string, error) {
args := &kms.CreateKeyRequest{
KeyUsage: KEY_USAGE_ENCRYPT_DECRYPT,
Description: fmt.Sprintf("kms-plugin-%d", time.Now().Unix()),
}
//args.Domain = s.domain
args.SetScheme(HTTPS)
response, err := client.CreateKey(args)
if err != nil {
glog.Errorf("Failed to generate kms key, err: %++v", err)
return "", err
}
glog.V(4).Infof("Success generate kms key = %++v", response)
return response.KeyMetadata.KeyId, nil
}
func (s *KMSServer) setupRPCServer() error {
if err := s.cleanSockFile(); err != nil {
return err
}
listener, err := net.Listen(netProtocol, s.pathToUnixSocket)
if err != nil {
return fmt.Errorf("failed to start listener, error: %v", err)
}
s.Listener = listener
glog.Infof("register unix domain socket: %s", s.pathToUnixSocket)
server := grpc.NewServer()
k8spb.RegisterKeyManagementServiceServer(server, s)
s.Server = server
return nil
}
// StartRPCServer starts gRPC server or dies.
func (s *KMSServer) StartRPCServer() (*grpc.Server, chan error) {
errorChan := make(chan error, 1)
if err := s.setupRPCServer(); err != nil {
errorChan <- err
close(errorChan)
return nil, errorChan
}
go func() {
defer close(errorChan)
errorChan <- s.Serve(s.Listener)
}()
glog.V(4).Infof("kms server started successfully.")
return s.Server, errorChan
}
//Version return the current api version
func (s *KMSServer) Version(ctx context.Context, request *k8spb.VersionRequest) (*k8spb.VersionResponse, error) {
glog.V(4).Infoln(Version)
return &k8spb.VersionResponse{Version: Version, RuntimeName: runtime, RuntimeVersion: runtimeVersion}, nil
}
//Encrypt execute encryption operation in KMS provider.
func (s *KMSServer) Encrypt(ctx context.Context, request *k8spb.EncryptRequest) (*k8spb.EncryptResponse, error) {
glog.V(4).Infoln("Processing EncryptRequest: ")
if s.keyID == "" {
key, err := s.getKey(s.client)
if err != nil {
return nil, err
}
s.keyID = key
}
glog.V(4).Infof("domain %s , key %s", s.domain, s.keyID)
encReq := kms.CreateEncryptRequest()
encReq.KeyId = s.keyID
encReq.Plaintext = base64.StdEncoding.EncodeToString(request.Plain)
encReq.Domain = s.domain
encReq.SetScheme(HTTPS)
encReq.SetHTTPSInsecure(true)
response, err := s.client.Encrypt(encReq)
if err != nil {
glog.Errorf("Failed to encrypt, error: %v", err)
return &k8spb.EncryptResponse{}, err
}
glog.V(4).Infof("Encrypt request %s finish", response.RequestId)
return &k8spb.EncryptResponse{Cipher: []byte(response.CiphertextBlob)}, nil
}
//Decrypt execute decryption operation in KMS provider.
func (s *KMSServer) Decrypt(ctx context.Context, request *k8spb.DecryptRequest) (*k8spb.DecryptResponse, error) {
glog.V(4).Infoln("Processing DecryptRequest: ")
if s.keyID == "" {
glog.Errorf("Empty key found to decrypt...")
return &k8spb.DecryptResponse{}, fmt.Errorf("empty key found to decrypt")
}
if s.keyID == "" {
key, err := s.getKey(s.client)
if err != nil {
return nil, err
}
s.keyID = key
}
decReq := kms.CreateDecryptRequest()
decReq.CiphertextBlob = string(request.Cipher)
decReq.Domain = s.domain
decReq.SetScheme(HTTPS)
decReq.SetHTTPSInsecure(true)
response, err := s.client.Decrypt(decReq)
if err != nil {
glog.Errorf("failed to decrypt, error: %v", err)
return &k8spb.DecryptResponse{}, err
}
plain, err := base64.StdEncoding.DecodeString(response.Plaintext)
if err != nil {
glog.Errorf("failed to decode plain text, error: %v", err)
return &k8spb.DecryptResponse{}, err
}
return &k8spb.DecryptResponse{Plain: plain}, nil
}
func (s *KMSServer) cleanSockFile() error {
err := unix.Unlink(s.pathToUnixSocket)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to delete socket file, error: %v", err)
}
return nil
}
|
[
"\"ACCESS_KEY_ID\"",
"\"ACCESS_KEY_SECRET\""
] |
[] |
[
"ACCESS_KEY_SECRET",
"ACCESS_KEY_ID"
] |
[]
|
["ACCESS_KEY_SECRET", "ACCESS_KEY_ID"]
|
go
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# twitch_osu_bot directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'twitch_osu_bot'))
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
broadcast-monitoring/src/audio_detect/app/main.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import logging
import json
import os
import sys
# Conditionally add /opt to the PYTHON PATH
if os.getenv('AWS_EXECUTION_ENV') is not None:
sys.path.append('/opt')
from audio_detect import execute_ffmpeg
from common.utils import download_file_from_s3, check_enabled, cleanup_dir
from common.config import LOG_LEVEL
logging.basicConfig()
logger = logging.getLogger('AudioDetection')
logger.setLevel(LOG_LEVEL)
SILENCE_THRESHOLD = os.getenv('SILENCE_THRESHOLD', '-60dB')
SILENCE_DURATION = os.getenv('SILENCE_DURATION', 1)
@check_enabled("audio_check_enabled")
@cleanup_dir()
def lambda_handler(event, context):
"""Download a transport stream file and process the audio to detect
silence and the mean volume
:param event:
{
"s3Bucket": "aws-rnd-broadcast-maas-video-processing-dev",
"s3Key": "live/test_video_single_pipeline/test_1.m3u8",
"s3VersionId": "KJCfz6c8Il5E_23jbzwYuFhGIpvMnrJE",
"parsed": {
{
"isMasterManifest": false,
"streamId": "test_1",
"lastSegment": {
"s3Key": "live/test_video_single_pipeline/test_1_00039.ts", # only if isMasterManifest = false
"startDateTime": "2020-01-23T21:36:35.290000Z", # only returned if isMasterManifest = false
"durationSec": 6
},
"expectedProgram"{
...
}
}
}
}
:param context: lambda context object
:return: A dict with representations of volume and silence chunks
{
"volume": {
"mean": -22.0,
"max": -4.3
},
"silence_chunks": [
{ "start": 1.33494, "end": 1.84523 },
{ "start": 3.52498, "end": 3.85456 }
]
}
"""
logger.info('Received event: %s', json.dumps(event, indent=2))
s3_bucket = event['s3Bucket']
segment_s3_key = event['parsed']['lastSegment']['s3Key']
# the cleanup_dir decorator will ensure the tmp/ working directory gets cleaned up if lambda container is reused
input_stream = download_file_from_s3(s3_bucket, segment_s3_key)
raw_results = execute_ffmpeg(
input_stream, threshold=SILENCE_THRESHOLD, duration=SILENCE_DURATION
)
logger.info(f'raw results:{raw_results}')
results = {
'silence_chunks': [convert_silence_to_dict(seg) for seg in raw_results['silencedetect']]
}
if raw_results['volumedetect']:
results['volume'] = convert_volume_to_dict(raw_results['volumedetect'])
return results
def convert_volume_to_dict(vol):
mean_vol, max_vol = vol
return {'mean': mean_vol, 'max': max_vol}
def convert_silence_to_dict(silence):
start, end = silence
return {'start': start, 'end': end}
|
[] |
[] |
[
"AWS_EXECUTION_ENV",
"SILENCE_DURATION",
"SILENCE_THRESHOLD"
] |
[]
|
["AWS_EXECUTION_ENV", "SILENCE_DURATION", "SILENCE_THRESHOLD"]
|
python
| 3 | 0 | |
storage/objects/objects_test.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package objects
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
"time"
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/golang-samples/internal/testutil"
"google.golang.org/api/iterator"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// TestObjects runs all samples tests of the package.
func TestObjects(t *testing.T) {
tc := testutil.SystemTest(t)
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
t.Fatalf("storage.NewClient: %v", err)
}
defer client.Close()
var (
bucket = tc.ProjectID + "-samples-object-bucket-1"
dstBucket = tc.ProjectID + "-samples-object-bucket-2"
object1 = "foo.txt"
object2 = "foo/a.txt"
allAuthenticatedUsers = storage.AllAuthenticatedUsers
roleReader = storage.RoleReader
)
cleanBucket(t, ctx, client, tc.ProjectID, bucket)
cleanBucket(t, ctx, client, tc.ProjectID, dstBucket)
if err := uploadFile(ioutil.Discard, bucket, object1); err != nil {
t.Fatalf("uploadFile(%q): %v", object1, err)
}
if err := uploadFile(ioutil.Discard, bucket, object2); err != nil {
t.Fatalf("uploadFile(%q): %v", object2, err)
}
{
// Should only show "foo/a.txt", not "foo.txt"
var buf bytes.Buffer
if err := listFiles(&buf, bucket); err != nil {
t.Fatalf("listFiles: %v", err)
}
if got, want := buf.String(), object1; !strings.Contains(got, want) {
t.Errorf("List() got %q; want to contain %q", got, want)
}
if got, want := buf.String(), object2; !strings.Contains(got, want) {
t.Errorf("List() got %q; want to contain %q", got, want)
}
}
{
// Should only show "foo/a.txt", not "foo.txt"
const prefix = "foo/"
var buf bytes.Buffer
if err := listFilesWithPrefix(&buf, bucket, prefix, ""); err != nil {
t.Fatalf("listFilesWithPrefix: %v", err)
}
if got, want := buf.String(), object1; strings.Contains(got, want) {
t.Errorf("List(%q) got %q; want NOT to contain %q", prefix, got, want)
}
if got, want := buf.String(), object2; !strings.Contains(got, want) {
t.Errorf("List(%q) got %q; want to contain %q", prefix, got, want)
}
}
{
if err := downloadUsingRequesterPays(ioutil.Discard, bucket, object1, tc.ProjectID); err != nil {
t.Errorf("downloadUsingRequesterPays: %v", err)
}
}
data, err := downloadFile(ioutil.Discard, bucket, object1)
if err != nil {
t.Fatalf("downloadFile: %v", err)
}
if got, want := string(data), "Hello\nworld"; got != want {
t.Errorf("contents = %q; want %q", got, want)
}
_, err = getMetadata(ioutil.Discard, bucket, object1)
if err != nil {
t.Errorf("getMetadata: %v", err)
}
if err := makePublic(ioutil.Discard, bucket, object1, allAuthenticatedUsers, roleReader); err != nil {
t.Errorf("makePublic: %v", err)
}
err = moveFile(ioutil.Discard, bucket, object1)
if err != nil {
t.Fatalf("moveFile: %v", err)
}
// object1's new name.
object1 = object1 + "-rename"
if err := copyFile(ioutil.Discard, dstBucket, bucket, object1); err != nil {
t.Errorf("copyFile: %v", err)
}
key := []byte("my-secret-AES-256-encryption-key")
newKey := []byte("My-secret-AES-256-encryption-key")
if err := uploadEncryptedFile(ioutil.Discard, bucket, object1, key); err != nil {
t.Errorf("uploadEncryptedFile: %v", err)
}
data, err = downloadEncryptedFile(ioutil.Discard, bucket, object1, key)
if err != nil {
t.Errorf("downloadEncryptedFile: %v", err)
}
if got, want := string(data), "top secret"; got != want {
t.Errorf("object content = %q; want %q", got, want)
}
if err := rotateEncryptionKey(ioutil.Discard, bucket, object1, key, newKey); err != nil {
t.Errorf("rotateEncryptionKey: %v", err)
}
if err := deleteFile(ioutil.Discard, bucket, object1); err != nil {
t.Errorf("deleteFile: %v", err)
}
if err := deleteFile(ioutil.Discard, bucket, object2); err != nil {
t.Errorf("deleteFile: %v", err)
}
testutil.Retry(t, 10, time.Second, func(r *testutil.R) {
// Cleanup, this part won't be executed if Fatal happens.
// TODO(jbd): Implement garbage cleaning.
if err := client.Bucket(bucket).Delete(ctx); err != nil {
r.Errorf("Bucket(%q).Delete: %v", bucket, err)
}
})
testutil.Retry(t, 10, time.Second, func(r *testutil.R) {
if err := deleteFile(ioutil.Discard, dstBucket, object1+"-copy"); err != nil {
r.Errorf("deleteFile: %v", err)
}
})
testutil.Retry(t, 10, time.Second, func(r *testutil.R) {
if err := client.Bucket(dstBucket).Delete(ctx); err != nil {
r.Errorf("Bucket(%q).Delete: %v", dstBucket, err)
}
})
}
func TestKMSObjects(t *testing.T) {
tc := testutil.SystemTest(t)
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
t.Fatalf("storage.NewClient: %v", err)
}
defer client.Close()
keyRingID := os.Getenv("GOLANG_SAMPLES_KMS_KEYRING")
cryptoKeyID := os.Getenv("GOLANG_SAMPLES_KMS_CRYPTOKEY")
if keyRingID == "" || cryptoKeyID == "" {
t.Skip("GOLANG_SAMPLES_KMS_KEYRING and GOLANG_SAMPLES_KMS_CRYPTOKEY must be set")
}
var (
bucket = tc.ProjectID + "-samples-object-bucket-1"
dstBucket = tc.ProjectID + "-samples-object-bucket-2"
object1 = "foo.txt"
)
cleanBucket(t, ctx, client, tc.ProjectID, bucket)
cleanBucket(t, ctx, client, tc.ProjectID, dstBucket)
kmsKeyName := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", tc.ProjectID, "global", keyRingID, cryptoKeyID)
if err := uploadWithKMSKey(ioutil.Discard, bucket, object1, kmsKeyName); err != nil {
t.Errorf("uploadWithKMSKey: %v", err)
}
}
func TestV4SignedURL(t *testing.T) {
tc := testutil.SystemTest(t)
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
t.Fatalf("storage.NewClient: %v", err)
}
defer client.Close()
bucketName := tc.ProjectID + "-signed-url-bucket-name"
objectName := "foo.txt"
serviceAccount := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
cleanBucket(t, ctx, client, tc.ProjectID, bucketName)
putBuf := new(bytes.Buffer)
putURL, err := generateV4PutObjectSignedURL(putBuf, bucketName, objectName, serviceAccount)
if err != nil {
t.Errorf("generateV4PutObjectSignedURL: %v", err)
}
got := putBuf.String()
if want := "Generated PUT signed URL:"; !strings.Contains(got, want) {
t.Errorf("got %q, want %q", got, want)
}
httpClient := &http.Client{}
request, err := http.NewRequest("PUT", putURL, strings.NewReader("hello world"))
request.ContentLength = 11
request.Header.Set("Content-Type", "application/octet-stream")
response, err := httpClient.Do(request)
if err != nil {
t.Errorf("httpClient.Do: %v", err)
}
getBuf := new(bytes.Buffer)
getURL, err := generateV4GetObjectSignedURL(getBuf, bucketName, objectName, serviceAccount)
if err != nil {
t.Errorf("generateV4GetObjectSignedURL: %v", err)
}
got = getBuf.String()
if want := "Generated GET signed URL:"; !strings.Contains(got, want) {
t.Errorf("got %q, want %q", got, want)
}
response, err = http.Get(getURL)
if err != nil {
t.Errorf("http.Get: %v", err)
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Errorf("ioutil.ReadAll: %v", err)
}
if got, want := string(body), "hello world"; got != want {
t.Errorf("object content = %q; want %q", got, want)
}
}
func TestObjectBucketLock(t *testing.T) {
tc := testutil.SystemTest(t)
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
t.Fatalf("storage.NewClient: %v", err)
}
defer client.Close()
var (
bucketName = tc.ProjectID + "-retent-samples-object-bucket"
objectName = "foo.txt"
retentionPeriod = 5 * time.Second
)
cleanBucket(t, ctx, client, tc.ProjectID, bucketName)
bucket := client.Bucket(bucketName)
if err := uploadFile(ioutil.Discard, bucketName, objectName); err != nil {
t.Fatalf("uploadFile(%q): %v", objectName, err)
}
if _, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
RetentionPolicy: &storage.RetentionPolicy{
RetentionPeriod: retentionPeriod,
},
}); err != nil {
t.Errorf("Bucket(%q).Update: %v", bucketName, err)
}
if err := setEventBasedHold(ioutil.Discard, bucketName, objectName); err != nil {
t.Errorf("setEventBasedHold(%q, %q): %v", bucketName, objectName, err)
}
oAttrs, err := getMetadata(ioutil.Discard, bucketName, objectName)
if err != nil {
t.Errorf("getMetadata: %v", err)
}
if !oAttrs.EventBasedHold {
t.Errorf("event-based hold is not enabled")
}
if err := releaseEventBasedHold(ioutil.Discard, bucketName, objectName); err != nil {
t.Errorf("releaseEventBasedHold(%q, %q): %v", bucketName, objectName, err)
}
oAttrs, err = getMetadata(ioutil.Discard, bucketName, objectName)
if err != nil {
t.Errorf("getMetadata: %v", err)
}
if oAttrs.EventBasedHold {
t.Errorf("event-based hold is not disabled")
}
if _, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
RetentionPolicy: &storage.RetentionPolicy{},
}); err != nil {
t.Errorf("Bucket(%q).Update: %v", bucketName, err)
}
if err := setTemporaryHold(ioutil.Discard, bucketName, objectName); err != nil {
t.Errorf("setTemporaryHold(%q, %q): %v", bucketName, objectName, err)
}
oAttrs, err = getMetadata(ioutil.Discard, bucketName, objectName)
if err != nil {
t.Errorf("getMetadata: %v", err)
}
if !oAttrs.TemporaryHold {
t.Errorf("temporary hold is not disabled")
}
if err := releaseTemporaryHold(ioutil.Discard, bucketName, objectName); err != nil {
t.Errorf("releaseTemporaryHold(%q, %q): %v", bucketName, objectName, err)
}
oAttrs, err = getMetadata(ioutil.Discard, bucketName, objectName)
if err != nil {
t.Errorf("getMetadata: %v", err)
}
if oAttrs.TemporaryHold {
t.Errorf("temporary hold is not disabled")
}
}
// cleanBucket ensures there's a fresh bucket with a given name, deleting the existing bucket if it already exists.
func cleanBucket(t *testing.T, ctx context.Context, client *storage.Client, projectID, bucket string) {
b := client.Bucket(bucket)
_, err := b.Attrs(ctx)
if err == nil {
it := b.Objects(ctx, nil)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatalf("Bucket(%q).Objects: %v", bucket, err)
}
if attrs.EventBasedHold || attrs.TemporaryHold {
if _, err := b.Object(attrs.Name).Update(ctx, storage.ObjectAttrsToUpdate{
TemporaryHold: false,
EventBasedHold: false,
}); err != nil {
t.Fatalf("Bucket(%q).Object(%q).Update: %v", bucket, attrs.Name, err)
}
}
if err := b.Object(attrs.Name).Delete(ctx); err != nil {
t.Fatalf("Bucket(%q).Object(%q).Delete: %v", bucket, attrs.Name, err)
}
}
if err := b.Delete(ctx); err != nil {
t.Fatalf("Bucket(%q).Delete: %v", bucket, err)
}
}
if err := b.Create(ctx, projectID, nil); err != nil && status.Code(err) != codes.AlreadyExists {
t.Fatalf("Bucket(%q).Create: %v", bucket, err)
}
}
|
[
"\"GOLANG_SAMPLES_KMS_KEYRING\"",
"\"GOLANG_SAMPLES_KMS_CRYPTOKEY\"",
"\"GOOGLE_APPLICATION_CREDENTIALS\""
] |
[] |
[
"GOLANG_SAMPLES_KMS_CRYPTOKEY",
"GOOGLE_APPLICATION_CREDENTIALS",
"GOLANG_SAMPLES_KMS_KEYRING"
] |
[]
|
["GOLANG_SAMPLES_KMS_CRYPTOKEY", "GOOGLE_APPLICATION_CREDENTIALS", "GOLANG_SAMPLES_KMS_KEYRING"]
|
go
| 3 | 0 | |
api/cmd/portainer/main.go
|
package main
import (
"encoding/json"
"os"
"strings"
"time"
portainer "github.com/hazik1024/portainer/api"
"github.com/hazik1024/portainer/api/bolt"
"github.com/hazik1024/portainer/api/cli"
"github.com/hazik1024/portainer/api/cron"
"github.com/hazik1024/portainer/api/crypto"
"github.com/hazik1024/portainer/api/custom/build"
"github.com/hazik1024/portainer/api/custom/mysqldb"
"github.com/hazik1024/portainer/api/custom/stackbackup"
"github.com/hazik1024/portainer/api/docker"
"github.com/hazik1024/portainer/api/exec"
"github.com/hazik1024/portainer/api/filesystem"
"github.com/hazik1024/portainer/api/git"
"github.com/hazik1024/portainer/api/http"
"github.com/hazik1024/portainer/api/http/client"
"github.com/hazik1024/portainer/api/jwt"
"github.com/hazik1024/portainer/api/ldap"
"github.com/hazik1024/portainer/api/libcompose"
"log"
_ "github.com/go-sql-driver/mysql"
)
func initCLI() *portainer.CLIFlags {
var cli portainer.CLIService = &cli.Service{}
flags, err := cli.ParseFlags(portainer.APIVersion)
if err != nil {
log.Fatal(err)
}
err = cli.ValidateFlags(flags)
if err != nil {
log.Fatal(err)
}
return flags
}
func initFileService(dataStorePath string) portainer.FileService {
fileService, err := filesystem.NewService(dataStorePath, "")
if err != nil {
log.Fatal(err)
}
return fileService
}
func initStore(dataStorePath string, fileService portainer.FileService) *bolt.Store {
store, err := bolt.NewStore(dataStorePath, fileService)
if err != nil {
log.Fatal(err)
}
err = store.Open()
if err != nil {
log.Fatal(err)
}
err = store.Init()
if err != nil {
log.Fatal(err)
}
err = store.MigrateData()
if err != nil {
log.Fatal(err)
}
return store
}
func initComposeStackManager(dataStorePath string) portainer.ComposeStackManager {
return libcompose.NewComposeStackManager(dataStorePath)
}
func initSwarmStackManager(assetsPath string, dataStorePath string, signatureService portainer.DigitalSignatureService, fileService portainer.FileService) (portainer.SwarmStackManager, error) {
return exec.NewSwarmStackManager(assetsPath, dataStorePath, signatureService, fileService)
}
func initJWTService(authenticationEnabled bool) portainer.JWTService {
if authenticationEnabled {
jwtService, err := jwt.NewService()
if err != nil {
log.Fatal(err)
}
return jwtService
}
return nil
}
func initDigitalSignatureService() portainer.DigitalSignatureService {
return crypto.NewECDSAService(os.Getenv("AGENT_SECRET"))
}
func initCryptoService() portainer.CryptoService {
return &crypto.Service{}
}
func initLDAPService() portainer.LDAPService {
return &ldap.Service{}
}
func initGitService() portainer.GitService {
return &git.Service{}
}
func initClientFactory(signatureService portainer.DigitalSignatureService) *docker.ClientFactory {
return docker.NewClientFactory(signatureService)
}
func initSnapshotter(clientFactory *docker.ClientFactory) portainer.Snapshotter {
return docker.NewSnapshotter(clientFactory)
}
func initJobScheduler() portainer.JobScheduler {
return cron.NewJobScheduler()
}
func loadSnapshotSystemSchedule(jobScheduler portainer.JobScheduler, snapshotter portainer.Snapshotter, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, settingsService portainer.SettingsService) error {
settings, err := settingsService.Settings()
if err != nil {
return err
}
schedules, err := scheduleService.SchedulesByJobType(portainer.SnapshotJobType)
if err != nil {
return err
}
var snapshotSchedule *portainer.Schedule
if len(schedules) == 0 {
snapshotJob := &portainer.SnapshotJob{}
snapshotSchedule = &portainer.Schedule{
ID: portainer.ScheduleID(scheduleService.GetNextIdentifier()),
Name: "system_snapshot",
CronExpression: "@every " + settings.SnapshotInterval,
Recurring: true,
JobType: portainer.SnapshotJobType,
SnapshotJob: snapshotJob,
Created: time.Now().Unix(),
}
} else {
snapshotSchedule = &schedules[0]
}
snapshotJobContext := cron.NewSnapshotJobContext(endpointService, snapshotter)
snapshotJobRunner := cron.NewSnapshotJobRunner(snapshotSchedule, snapshotJobContext)
err = jobScheduler.ScheduleJob(snapshotJobRunner)
if err != nil {
return err
}
if len(schedules) == 0 {
return scheduleService.CreateSchedule(snapshotSchedule)
}
return nil
}
func loadEndpointSyncSystemSchedule(jobScheduler portainer.JobScheduler, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, flags *portainer.CLIFlags) error {
if *flags.ExternalEndpoints == "" {
return nil
}
log.Println("Using external endpoint definition. Endpoint management via the API will be disabled.")
schedules, err := scheduleService.SchedulesByJobType(portainer.EndpointSyncJobType)
if err != nil {
return err
}
if len(schedules) != 0 {
return nil
}
endpointSyncJob := &portainer.EndpointSyncJob{}
endpointSyncSchedule := &portainer.Schedule{
ID: portainer.ScheduleID(scheduleService.GetNextIdentifier()),
Name: "system_endpointsync",
CronExpression: "@every " + *flags.SyncInterval,
Recurring: true,
JobType: portainer.EndpointSyncJobType,
EndpointSyncJob: endpointSyncJob,
Created: time.Now().Unix(),
}
endpointSyncJobContext := cron.NewEndpointSyncJobContext(endpointService, *flags.ExternalEndpoints)
endpointSyncJobRunner := cron.NewEndpointSyncJobRunner(endpointSyncSchedule, endpointSyncJobContext)
err = jobScheduler.ScheduleJob(endpointSyncJobRunner)
if err != nil {
return err
}
return scheduleService.CreateSchedule(endpointSyncSchedule)
}
func loadSchedulesFromDatabase(jobScheduler portainer.JobScheduler, jobService portainer.JobService, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, fileService portainer.FileService) error {
schedules, err := scheduleService.Schedules()
if err != nil {
return err
}
for _, schedule := range schedules {
if schedule.JobType == portainer.ScriptExecutionJobType {
jobContext := cron.NewScriptExecutionJobContext(jobService, endpointService, fileService)
jobRunner := cron.NewScriptExecutionJobRunner(&schedule, jobContext)
err = jobScheduler.ScheduleJob(jobRunner)
if err != nil {
return err
}
}
}
return nil
}
func initStatus(endpointManagement, snapshot bool, flags *portainer.CLIFlags) *portainer.Status {
return &portainer.Status{
Analytics: !*flags.NoAnalytics,
Authentication: !*flags.NoAuth,
EndpointManagement: endpointManagement,
Snapshot: snapshot,
Version: portainer.APIVersion,
}
}
func initDockerHub(dockerHubService portainer.DockerHubService) error {
_, err := dockerHubService.DockerHub()
if err == portainer.ErrObjectNotFound {
dockerhub := &portainer.DockerHub{
Authentication: false,
Username: "",
Password: "",
}
return dockerHubService.UpdateDockerHub(dockerhub)
} else if err != nil {
return err
}
return nil
}
func initSettings(settingsService portainer.SettingsService, flags *portainer.CLIFlags) error {
_, err := settingsService.Settings()
if err == portainer.ErrObjectNotFound {
settings := &portainer.Settings{
LogoURL: *flags.Logo,
AuthenticationMethod: portainer.AuthenticationInternal,
LDAPSettings: portainer.LDAPSettings{
AutoCreateUsers: true,
TLSConfig: portainer.TLSConfiguration{},
SearchSettings: []portainer.LDAPSearchSettings{
portainer.LDAPSearchSettings{},
},
GroupSearchSettings: []portainer.LDAPGroupSearchSettings{
portainer.LDAPGroupSearchSettings{},
},
},
OAuthSettings: portainer.OAuthSettings{},
AllowBindMountsForRegularUsers: true,
AllowPrivilegedModeForRegularUsers: true,
EnableHostManagementFeatures: false,
SnapshotInterval: *flags.SnapshotInterval,
}
if *flags.Templates != "" {
settings.TemplatesURL = *flags.Templates
}
if *flags.Labels != nil {
settings.BlackListedLabels = *flags.Labels
} else {
settings.BlackListedLabels = make([]portainer.Pair, 0)
}
return settingsService.UpdateSettings(settings)
} else if err != nil {
return err
}
return nil
}
func initTemplates(templateService portainer.TemplateService, fileService portainer.FileService, templateURL, templateFile string) error {
if templateURL != "" {
log.Printf("Portainer started with the --templates flag. Using external templates, template management will be disabled.")
return nil
}
existingTemplates, err := templateService.Templates()
if err != nil {
return err
}
if len(existingTemplates) != 0 {
log.Printf("Templates already registered inside the database. Skipping template import.")
return nil
}
templatesJSON, err := fileService.GetFileContent(templateFile)
if err != nil {
log.Println("Unable to retrieve template definitions via filesystem")
return err
}
var templates []portainer.Template
err = json.Unmarshal(templatesJSON, &templates)
if err != nil {
log.Println("Unable to parse templates file. Please review your template definition file.")
return err
}
for _, template := range templates {
err := templateService.CreateTemplate(&template)
if err != nil {
return err
}
}
return nil
}
func retrieveFirstEndpointFromDatabase(endpointService portainer.EndpointService) *portainer.Endpoint {
endpoints, err := endpointService.Endpoints()
if err != nil {
log.Fatal(err)
}
return &endpoints[0]
}
func loadAndParseKeyPair(fileService portainer.FileService, signatureService portainer.DigitalSignatureService) error {
private, public, err := fileService.LoadKeyPair()
if err != nil {
return err
}
return signatureService.ParseKeyPair(private, public)
}
func generateAndStoreKeyPair(fileService portainer.FileService, signatureService portainer.DigitalSignatureService) error {
private, public, err := signatureService.GenerateKeyPair()
if err != nil {
return err
}
privateHeader, publicHeader := signatureService.PEMHeaders()
return fileService.StoreKeyPair(private, public, privateHeader, publicHeader)
}
func initKeyPair(fileService portainer.FileService, signatureService portainer.DigitalSignatureService) error {
existingKeyPair, err := fileService.KeyPairFilesExist()
if err != nil {
log.Fatal(err)
}
if existingKeyPair {
return loadAndParseKeyPair(fileService, signatureService)
}
return generateAndStoreKeyPair(fileService, signatureService)
}
func createTLSSecuredEndpoint(flags *portainer.CLIFlags, endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) error {
tlsConfiguration := portainer.TLSConfiguration{
TLS: *flags.TLS,
TLSSkipVerify: *flags.TLSSkipVerify,
}
if *flags.TLS {
tlsConfiguration.TLSCACertPath = *flags.TLSCacert
tlsConfiguration.TLSCertPath = *flags.TLSCert
tlsConfiguration.TLSKeyPath = *flags.TLSKey
} else if !*flags.TLS && *flags.TLSSkipVerify {
tlsConfiguration.TLS = true
}
endpointID := endpointService.GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: "primary",
URL: *flags.EndpointURL,
GroupID: portainer.EndpointGroupID(1),
Type: portainer.DockerEnvironment,
TLSConfig: tlsConfiguration,
UserAccessPolicies: portainer.UserAccessPolicies{},
TeamAccessPolicies: portainer.TeamAccessPolicies{},
Extensions: []portainer.EndpointExtension{},
Tags: []string{},
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.Snapshot{},
}
if strings.HasPrefix(endpoint.URL, "tcp://") {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(tlsConfiguration.TLSCACertPath, tlsConfiguration.TLSCertPath, tlsConfiguration.TLSKeyPath, tlsConfiguration.TLSSkipVerify)
if err != nil {
return err
}
agentOnDockerEnvironment, err := client.ExecutePingOperation(endpoint.URL, tlsConfig)
if err != nil {
return err
}
if agentOnDockerEnvironment {
endpoint.Type = portainer.AgentOnDockerEnvironment
}
}
return snapshotAndPersistEndpoint(endpoint, endpointService, snapshotter)
}
func createUnsecuredEndpoint(endpointURL string, endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) error {
if strings.HasPrefix(endpointURL, "tcp://") {
_, err := client.ExecutePingOperation(endpointURL, nil)
if err != nil {
return err
}
}
endpointID := endpointService.GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: "primary",
URL: endpointURL,
GroupID: portainer.EndpointGroupID(1),
Type: portainer.DockerEnvironment,
TLSConfig: portainer.TLSConfiguration{},
UserAccessPolicies: portainer.UserAccessPolicies{},
TeamAccessPolicies: portainer.TeamAccessPolicies{},
Extensions: []portainer.EndpointExtension{},
Tags: []string{},
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.Snapshot{},
}
return snapshotAndPersistEndpoint(endpoint, endpointService, snapshotter)
}
func snapshotAndPersistEndpoint(endpoint *portainer.Endpoint, endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) error {
snapshot, err := snapshotter.CreateSnapshot(endpoint)
endpoint.Status = portainer.EndpointStatusUp
if err != nil {
log.Printf("http error: endpoint snapshot error (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, err)
}
if snapshot != nil {
endpoint.Snapshots = []portainer.Snapshot{*snapshot}
}
return endpointService.CreateEndpoint(endpoint)
}
func initEndpoint(flags *portainer.CLIFlags, endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) error {
if *flags.EndpointURL == "" {
return nil
}
endpoints, err := endpointService.Endpoints()
if err != nil {
return err
}
if len(endpoints) > 0 {
log.Println("Instance already has defined endpoints. Skipping the endpoint defined via CLI.")
return nil
}
if *flags.TLS || *flags.TLSSkipVerify {
return createTLSSecuredEndpoint(flags, endpointService, snapshotter)
}
return createUnsecuredEndpoint(*flags.EndpointURL, endpointService, snapshotter)
}
func initJobService(dockerClientFactory *docker.ClientFactory) portainer.JobService {
return docker.NewJobService(dockerClientFactory)
}
func initExtensionManager(fileService portainer.FileService, extensionService portainer.ExtensionService) (portainer.ExtensionManager, error) {
extensionManager := exec.NewExtensionManager(fileService, extensionService)
extensions, err := extensionService.Extensions()
if err != nil {
return nil, err
}
for _, extension := range extensions {
err := extensionManager.EnableExtension(&extension, extension.License.LicenseKey)
if err != nil {
log.Printf("Unable to enable extension: %s [extension: %s]", err.Error(), extension.Name)
extension.Enabled = false
extension.License.Valid = false
extensionService.Persist(&extension)
}
}
return extensionManager, nil
}
func terminateIfNoAdminCreated(userService portainer.UserService) {
timer1 := time.NewTimer(5 * time.Minute)
<-timer1.C
users, err := userService.UsersByRole(portainer.AdministratorRole)
if err != nil {
log.Fatal(err)
}
if len(users) == 0 {
log.Fatal("No administrator account was created after 5 min. Shutting down the Portainer instance for security reasons.")
return
}
}
func main() {
flags := initCLI()
fileService := initFileService(*flags.Data)
store := initStore(*flags.Data, fileService)
defer store.Close()
jwtService := initJWTService(!*flags.NoAuth)
ldapService := initLDAPService()
gitService := initGitService()
cryptoService := initCryptoService()
digitalSignatureService := initDigitalSignatureService()
err := initKeyPair(fileService, digitalSignatureService)
if err != nil {
log.Fatal(err)
}
extensionManager, err := initExtensionManager(fileService, store.ExtensionService)
if err != nil {
log.Fatal(err)
}
clientFactory := initClientFactory(digitalSignatureService)
jobService := initJobService(clientFactory)
snapshotter := initSnapshotter(clientFactory)
endpointManagement := true
if *flags.ExternalEndpoints != "" {
endpointManagement = false
}
swarmStackManager, err := initSwarmStackManager(*flags.Assets, *flags.Data, digitalSignatureService, fileService)
if err != nil {
log.Fatal(err)
}
composeStackManager := initComposeStackManager(*flags.Data)
err = initTemplates(store.TemplateService, fileService, *flags.Templates, *flags.TemplateFile)
if err != nil {
log.Fatal(err)
}
err = initSettings(store.SettingsService, flags)
if err != nil {
log.Fatal(err)
}
jobScheduler := initJobScheduler()
err = loadSchedulesFromDatabase(jobScheduler, jobService, store.ScheduleService, store.EndpointService, fileService)
if err != nil {
log.Fatal(err)
}
err = loadEndpointSyncSystemSchedule(jobScheduler, store.ScheduleService, store.EndpointService, flags)
if err != nil {
log.Fatal(err)
}
if *flags.Snapshot {
err = loadSnapshotSystemSchedule(jobScheduler, snapshotter, store.ScheduleService, store.EndpointService, store.SettingsService)
if err != nil {
log.Fatal(err)
}
}
jobScheduler.Start()
err = initDockerHub(store.DockerHubService)
if err != nil {
log.Fatal(err)
}
applicationStatus := initStatus(endpointManagement, *flags.Snapshot, flags)
err = initEndpoint(flags, store.EndpointService, snapshotter)
if err != nil {
log.Fatal(err)
}
adminPasswordHash := ""
if *flags.AdminPasswordFile != "" {
content, err := fileService.GetFileContent(*flags.AdminPasswordFile)
if err != nil {
log.Fatal(err)
}
adminPasswordHash, err = cryptoService.Hash(string(content))
if err != nil {
log.Fatal(err)
}
} else if *flags.AdminPassword != "" {
adminPasswordHash = *flags.AdminPassword
}
if adminPasswordHash != "" {
users, err := store.UserService.UsersByRole(portainer.AdministratorRole)
if err != nil {
log.Fatal(err)
}
if len(users) == 0 {
log.Printf("Creating admin user with password hash %s", adminPasswordHash)
user := &portainer.User{
Username: "admin",
Role: portainer.AdministratorRole,
Password: adminPasswordHash,
PortainerAuthorizations: map[portainer.Authorization]bool{
portainer.OperationPortainerDockerHubInspect: true,
portainer.OperationPortainerEndpointGroupList: true,
portainer.OperationPortainerEndpointList: true,
portainer.OperationPortainerEndpointInspect: true,
portainer.OperationPortainerEndpointExtensionAdd: true,
portainer.OperationPortainerEndpointExtensionRemove: true,
portainer.OperationPortainerExtensionList: true,
portainer.OperationPortainerMOTD: true,
portainer.OperationPortainerRegistryList: true,
portainer.OperationPortainerRegistryInspect: true,
portainer.OperationPortainerTeamList: true,
portainer.OperationPortainerTemplateList: true,
portainer.OperationPortainerTemplateInspect: true,
portainer.OperationPortainerUserList: true,
portainer.OperationPortainerUserMemberships: true,
},
}
err := store.UserService.CreateUser(user)
if err != nil {
log.Fatal(err)
}
} else {
log.Println("Instance already has an administrator user defined. Skipping admin password related flags.")
}
}
if !*flags.NoAuth {
go terminateIfNoAdminCreated(store.UserService)
}
// custom mysql/build/stackbackup
mysqlDb := mysqldb.NewMySQLDb()
defer mysqlDb.Close()
buildService := build.NewService(mysqlDb, store.RegistryService)
stackbackupService := stackbackup.NewBackupService()
var server portainer.Server = &http.Server{
Status: applicationStatus,
BindAddress: *flags.Addr,
AssetsPath: *flags.Assets,
AuthDisabled: *flags.NoAuth,
EndpointManagement: endpointManagement,
RoleService: store.RoleService,
UserService: store.UserService,
TeamService: store.TeamService,
TeamMembershipService: store.TeamMembershipService,
EndpointService: store.EndpointService,
EndpointGroupService: store.EndpointGroupService,
ExtensionService: store.ExtensionService,
ResourceControlService: store.ResourceControlService,
SettingsService: store.SettingsService,
RegistryService: store.RegistryService,
DockerHubService: store.DockerHubService,
StackService: store.StackService,
ScheduleService: store.ScheduleService,
TagService: store.TagService,
TemplateService: store.TemplateService,
WebhookService: store.WebhookService,
SwarmStackManager: swarmStackManager,
ComposeStackManager: composeStackManager,
ExtensionManager: extensionManager,
CryptoService: cryptoService,
JWTService: jwtService,
FileService: fileService,
LDAPService: ldapService,
GitService: gitService,
SignatureService: digitalSignatureService,
JobScheduler: jobScheduler,
Snapshotter: snapshotter,
SSL: *flags.SSL,
SSLCert: *flags.SSLCert,
SSLKey: *flags.SSLKey,
DockerClientFactory: clientFactory,
JobService: jobService,
BuildService: buildService,
StackbackupService: stackbackupService,
}
log.Printf("Starting Portainer %s on %s", portainer.APIVersion, *flags.Addr)
err = server.Start()
if err != nil {
log.Fatal(err)
}
}
|
[
"\"AGENT_SECRET\""
] |
[] |
[
"AGENT_SECRET"
] |
[]
|
["AGENT_SECRET"]
|
go
| 1 | 0 | |
src/testcases/CWE129_Improper_Validation_of_Array_Index/s02/CWE129_Improper_Validation_of_Array_Index__Environment_array_size_41.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE129_Improper_Validation_of_Array_Index__Environment_array_size_41.java
Label Definition File: CWE129_Improper_Validation_of_Array_Index.label.xml
Template File: sources-sinks-41.tmpl.java
*/
/*
* @description
* CWE: 129 Improper Validation of Array Index
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: array_size
* GoodSink: data is used to set the size of the array and it must be greater than 0
* BadSink : data is used to set the size of the array, but it could be set to 0
* Flow Variant: 41 Data flow: data passed as an argument from one method to another in the same class
*
* */
package testcases.CWE129_Improper_Validation_of_Array_Index.s02;
import testcasesupport.*;
import javax.servlet.http.*;
import java.util.logging.Level;
public class CWE129_Improper_Validation_of_Array_Index__Environment_array_size_41 extends AbstractTestCase
{
private void badSink(int data ) throws Throwable
{
int array[] = null;
/* POTENTIAL FLAW: Verify that data is non-negative, but still allow it to be 0 */
if (data >= 0)
{
array = new int[data];
}
else
{
IO.writeLine("Array size is negative");
}
/* do something with the array */
array[0] = 5;
IO.writeLine(array[0]);
}
public void bad() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
badSink(data );
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
private void goodG2BSink(int data ) throws Throwable
{
int array[] = null;
/* POTENTIAL FLAW: Verify that data is non-negative, but still allow it to be 0 */
if (data >= 0)
{
array = new int[data];
}
else
{
IO.writeLine("Array size is negative");
}
/* do something with the array */
array[0] = 5;
IO.writeLine(array[0]);
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
int data;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
goodG2BSink(data );
}
private void goodB2GSink(int data ) throws Throwable
{
/* Need to ensure that the array is of size > 3 and < 101 due to the GoodSource and the large_fixed BadSource */
int array[] = null;
/* FIX: Verify that data is non-negative AND greater than 0 */
if (data > 0)
{
array = new int[data];
}
else
{
IO.writeLine("Array size is negative");
}
/* do something with the array */
array[0] = 5;
IO.writeLine(array[0]);
}
/* goodB2G() - use badsource and goodsink */
private void goodB2G() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
goodB2GSink(data );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\"",
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
actions/ses.go
|
package actions
import (
"fmt"
"net/http"
"net/url"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ses"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/gin-gonic/gin"
"github.com/mailbadger/app/emails"
"github.com/mailbadger/app/entities"
"github.com/mailbadger/app/entities/params"
"github.com/mailbadger/app/events"
"github.com/mailbadger/app/logger"
"github.com/mailbadger/app/routes/middleware"
"github.com/mailbadger/app/storage"
"github.com/mailbadger/app/validator"
)
func GetSESKeys(c *gin.Context) {
u := middleware.GetUser(c)
keys, err := storage.GetSesKeys(c, u.ID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"message": "AWS Ses keys not set.",
})
return
}
keys.SecretKey = "" //do not return the secret
c.JSON(http.StatusOK, keys)
}
func PostSESKeys(c *gin.Context) {
u := middleware.GetUser(c)
_, err := storage.GetSesKeys(c, u.ID)
if err == nil {
c.JSON(http.StatusBadRequest, gin.H{
"message": "AWS Ses keys are already set.",
})
return
}
body := ¶ms.PostSESKeys{}
if err := c.ShouldBindJSON(body); err != nil {
logger.From(c).WithError(err).Error("Unable to bind ses keys params.")
c.JSON(http.StatusBadRequest, gin.H{
"message": "Invalid parameters, please try again",
})
return
}
if err := validator.Validate(body); err != nil {
logger.From(c).WithError(err).Error("Invalid ses keys params.")
c.JSON(http.StatusBadRequest, err)
return
}
keys := &entities.SesKeys{
AccessKey: body.AccessKey,
SecretKey: body.SecretKey,
Region: body.Region,
UserID: u.ID,
}
sender, err := emails.NewSesSender(keys.AccessKey, keys.SecretKey, keys.Region)
if err != nil {
logger.From(c).WithError(err).Warn("Unable to create SES sender.")
c.JSON(http.StatusBadRequest, gin.H{
"message": "SES keys are incorrect.",
})
return
}
snsClient, err := events.NewEventsClient(keys.AccessKey, keys.SecretKey, keys.Region)
if err != nil {
logger.From(c).WithError(err).Warn("Unable to create SNS client.")
c.JSON(http.StatusBadRequest, gin.H{
"message": "SES keys are incorrect.",
})
return
}
//TODO: Move this to work queue to be more robust.
//createAWSResources is a slow process and could fail periodically.
go func(
c *gin.Context,
sender emails.Sender,
snsClient events.EventsClient,
keys *entities.SesKeys,
uuid string,
) {
err := createAWSResources(sender, snsClient, uuid)
if err != nil {
logger.From(c).WithError(err).Warn("Unable to create AWS resources.")
return
}
err = storage.CreateSesKeys(c, keys)
if err != nil {
logger.From(c).WithError(err).Warn("Unable to create SES keys.")
}
}(c.Copy(), sender, snsClient, keys, u.UUID)
c.JSON(http.StatusOK, gin.H{
"message": "We are currently processing the request.",
})
}
func createAWSResources(
sender emails.Sender,
snsClient events.EventsClient,
uuid string,
) error {
hookURLStr := fmt.Sprintf("%s/api/hooks/%s", os.Getenv("APP_URL"), uuid)
hookURL, err := url.Parse(hookURLStr)
if err != nil {
return fmt.Errorf("ses keys: unable to parse hook URL: %w", err)
}
snsRes, err := snsClient.CreateTopic(&sns.CreateTopicInput{
Name: aws.String(events.SNSTopicName),
})
if err != nil {
return fmt.Errorf("ses keys: unable to create SNS topic: %w", err)
}
topicArn := *snsRes.TopicArn
_, err = snsClient.Subscribe(&sns.SubscribeInput{
Protocol: aws.String(hookURL.Scheme),
Endpoint: aws.String(hookURLStr),
TopicArn: aws.String(topicArn),
})
if err != nil {
return fmt.Errorf("ses keys: unable to subscribe to topic: %w", err)
}
// Check if the configuration set is already created
cs, err := sender.DescribeConfigurationSet(&ses.DescribeConfigurationSetInput{
ConfigurationSetName: aws.String(emails.ConfigurationSetName),
ConfigurationSetAttributeNames: []*string{
aws.String("eventDestinations"),
},
})
if err != nil {
_, err = sender.CreateConfigurationSet(&ses.CreateConfigurationSetInput{
ConfigurationSet: &ses.ConfigurationSet{
Name: aws.String(emails.ConfigurationSetName),
},
})
if err != nil {
return fmt.Errorf("ses keys: unable to create configuration set: %w", err)
}
}
// Check if the event destination is set
eventFound := false
for _, e := range cs.EventDestinations {
if e.Name != nil && *e.Name == events.SNSTopicName {
eventFound = true
}
}
if eventFound {
return nil
}
_, err = sender.CreateConfigurationSetEventDestination(&ses.CreateConfigurationSetEventDestinationInput{
ConfigurationSetName: aws.String(emails.ConfigurationSetName),
EventDestination: &ses.EventDestination{
Name: aws.String(events.SNSTopicName),
Enabled: aws.Bool(true),
MatchingEventTypes: []*string{
aws.String("send"),
aws.String("open"),
aws.String("click"),
aws.String("bounce"),
aws.String("reject"),
aws.String("delivery"),
aws.String("complaint"),
aws.String("renderingFailure"),
},
SNSDestination: &ses.SNSDestination{
TopicARN: aws.String(topicArn),
},
},
})
if err != nil {
return fmt.Errorf("ses keys: unable to set event destination: %w", err)
}
return nil
}
func DeleteSESKeys(c *gin.Context) {
u := middleware.GetUser(c)
keys, err := storage.GetSesKeys(c, u.ID)
if err != nil {
c.Status(http.StatusNoContent)
return
}
sender, err := emails.NewSesSender(keys.AccessKey, keys.SecretKey, keys.Region)
if err != nil {
logger.From(c).WithError(err).Warn("Unable to create SES sender.")
c.JSON(http.StatusBadRequest, gin.H{
"message": "SES keys are incorrect.",
})
return
}
snsClient, err := events.NewEventsClient(keys.AccessKey, keys.SecretKey, keys.Region)
if err != nil {
logger.From(c).WithError(err).Warn("Unable to create SNS client.")
c.JSON(http.StatusBadRequest, gin.H{
"message": "SES keys are incorrect.",
})
return
}
cs, err := sender.DescribeConfigurationSet(&ses.DescribeConfigurationSetInput{
ConfigurationSetName: aws.String(emails.ConfigurationSetName),
ConfigurationSetAttributeNames: []*string{
aws.String("eventDestinations"),
},
})
if err == nil {
// find and delete topic
for _, e := range cs.EventDestinations {
if e.Name != nil && *e.Name == events.SNSTopicName {
_, err := snsClient.DeleteTopic(&sns.DeleteTopicInput{
TopicArn: e.SNSDestination.TopicARN,
})
if err != nil {
logger.From(c).WithError(err).Warn("Unable to delete topic.")
}
break
}
}
_, err = sender.DeleteConfigurationSet(&ses.DeleteConfigurationSetInput{
ConfigurationSetName: aws.String(emails.ConfigurationSetName),
})
if err != nil {
logger.From(c).WithError(err).Warn("Unable to delete configuration set.")
}
}
err = storage.DeleteSesKeys(c, u.ID)
if err != nil {
logger.From(c).WithError(err).Warn("Unable to delete SES keys.")
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to delete SES keys.",
})
return
}
c.Status(http.StatusNoContent)
}
func GetSESQuota(c *gin.Context) {
u := middleware.GetUser(c)
keys, err := storage.GetSesKeys(c, u.ID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"message": "AWS Ses keys not set.",
})
return
}
sender, err := emails.NewSesSender(keys.AccessKey, keys.SecretKey, keys.Region)
if err != nil {
logger.From(c).WithError(err).Warn("Unable to create SES sender.")
c.JSON(http.StatusBadRequest, gin.H{
"message": "SES keys are incorrect.",
})
return
}
res, err := sender.GetSendQuota(&ses.GetSendQuotaInput{})
if err != nil {
logger.From(c).WithError(err).Warn("Unable to fetch send quota.")
c.JSON(http.StatusBadRequest, gin.H{
"message": "Unable to fetch send quota.",
})
return
}
c.JSON(http.StatusOK, entities.SendQuota{
Max24HourSend: *res.Max24HourSend,
MaxSendRate: *res.MaxSendRate,
SentLast24Hours: *res.SentLast24Hours,
})
}
|
[
"\"APP_URL\""
] |
[] |
[
"APP_URL"
] |
[]
|
["APP_URL"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.