filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
code/main.py
|
# Copyright 2018 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the entrypoint to the rest of the code"""
from __future__ import absolute_import
from __future__ import division
import os
import io
import json
import sys
import logging
import tensorflow as tf
from qa_model import QAModel, QATransformerModel
from vocab import get_glove
from official_eval_helper import get_json_data, generate_answers
print('setting up logging')
logging.basicConfig(level=logging.INFO)
print(logging, logging.getLogger())
MAIN_DIR = os.path.relpath(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # relative path of the main directory
DEFAULT_DATA_DIR = os.path.join(MAIN_DIR, "data") # relative path of data dir
EXPERIMENTS_DIR = os.path.join(MAIN_DIR, "experiments") # relative path of experiments dir
# High-level options
tf.app.flags.DEFINE_integer("gpu", 0, "Which GPU to use, if you have multiple.")
tf.app.flags.DEFINE_string("mode", "train", "Available modes: train / show_examples / official_eval")
tf.app.flags.DEFINE_string("experiment_name", "", "Unique name for your experiment. This will create a directory by this name in the experiments/ directory, which will hold all data related to this experiment")
tf.app.flags.DEFINE_integer("num_epochs", 0, "Number of epochs to train. 0 means train indefinitely")
# Hyperparameters
#tf.app.ilags.DEFINE_float("learning_rate", 0.001, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate", 0.2, "Learning rate.")
tf.app.flags.DEFINE_float("max_gradient_norm", 8.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("dropout", 0.1, "Fraction of units randomly dropped on non-recurrent connections.")
tf.app.flags.DEFINE_integer("batch_size", 100, "Batch size to use")
tf.app.flags.DEFINE_integer("hidden_size", 100, "Size of the hidden states")
# the long tail is between 300 and 390 TODO change to 390 at test time
tf.app.flags.DEFINE_integer("context_len", 300, "The maximum context length of your model")
# the long tail is between 22 and 30
tf.app.flags.DEFINE_integer("question_len", 30, "The maximum question length of your model")
tf.app.flags.DEFINE_integer("embedding_size", 100, "Size of the pretrained word vectors. This needs to be one of the available GloVe dimensions: 50/100/200/300")
# Transformer Network Hparams
# TODO change to 6
tf.app.flags.DEFINE_integer("num_hidden_layers", 4, "number of transformer blocks")
tf.app.flags.DEFINE_integer("num_heads", 10, "number of heads for attention")
# How often to print, save, eval
tf.app.flags.DEFINE_integer("print_every", 1, "How many iterations to do per print.")
tf.app.flags.DEFINE_integer("save_every", 500, "How many iterations to do per save.")
tf.app.flags.DEFINE_integer("eval_every", 500, "How many iterations to do per calculating loss/f1/em on dev set. Warning: this is fairly time-consuming so don't do it too often.")
tf.app.flags.DEFINE_integer("keep", 1, "How many checkpoints to keep. 0 indicates keep all (you shouldn't need to do keep all though - it's very storage intensive).")
# Reading and saving data
tf.app.flags.DEFINE_string("train_dir", "", "Training directory to save the model parameters and other info. Defaults to experiments/{experiment_name}")
tf.app.flags.DEFINE_string("glove_path", "", "Path to glove .txt file. Defaults to data/glove.6B.{embedding_size}d.txt")
tf.app.flags.DEFINE_string("data_dir", DEFAULT_DATA_DIR, "Where to find preprocessed SQuAD data for training. Defaults to data/")
tf.app.flags.DEFINE_string("ckpt_load_dir", "", "For official_eval mode, which directory to load the checkpoint fron. You need to specify this for official_eval mode.")
tf.app.flags.DEFINE_string("json_in_path", "", "For official_eval mode, path to JSON input file. You need to specify this for official_eval_mode.")
tf.app.flags.DEFINE_string("json_out_path", "predictions.json", "Output path for official_eval mode. Defaults to predictions.json")
FLAGS = tf.app.flags.FLAGS
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
def initialize_model(session, model, train_dir, expect_exists):
"""
Initializes model from train_dir.
Inputs:
session: TensorFlow session
model: QAModel
train_dir: path to directory where we'll look for checkpoint
expect_exists: If True, throw an error if no checkpoint is found.
If False, initialize fresh model if no checkpoint is found.
"""
print "Looking for model at %s..." % train_dir
ckpt = tf.train.get_checkpoint_state(train_dir)
v2_path = ckpt.model_checkpoint_path + ".index" if ckpt else ""
if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):
print "Reading model parameters from %s" % ckpt.model_checkpoint_path
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
if expect_exists:
raise Exception("There is no saved checkpoint at %s" % train_dir)
else:
print "There is no saved checkpoint at %s. Creating model with fresh parameters." % train_dir
session.run(tf.global_variables_initializer())
print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())
def main(unused_argv):
# Print an error message if you've entered flags incorrectly
if len(unused_argv) != 1:
raise Exception("There is a problem with how you entered flags: %s" % unused_argv)
# Check for Python 2
if sys.version_info[0] != 2:
raise Exception("ERROR: You must use Python 2 but you are running Python %i" % sys.version_info[0])
# Print out Tensorflow version
print "This code was developed and tested on TensorFlow 1.4.1. Your TensorFlow version: %s" % tf.__version__
# Define train_dir
if not FLAGS.experiment_name and not FLAGS.train_dir and FLAGS.mode != "official_eval":
raise Exception("You need to specify either --experiment_name or --train_dir")
FLAGS.train_dir = FLAGS.train_dir or os.path.join(EXPERIMENTS_DIR, FLAGS.experiment_name)
# Initialize bestmodel directory
bestmodel_dir = os.path.join(FLAGS.train_dir, "best_checkpoint")
# Define path for glove vecs
FLAGS.glove_path = FLAGS.glove_path or os.path.join(DEFAULT_DATA_DIR, "glove.6B.{}d.txt".format(FLAGS.embedding_size))
# Load embedding matrix and vocab mappings
emb_matrix, word2id, id2word = get_glove(FLAGS.glove_path, FLAGS.embedding_size)
# Get filepaths to train/dev datafiles for tokenized queries, contexts and answers
train_context_path = os.path.join(FLAGS.data_dir, "train.context")
train_qn_path = os.path.join(FLAGS.data_dir, "train.question")
train_ans_path = os.path.join(FLAGS.data_dir, "train.span")
dev_context_path = os.path.join(FLAGS.data_dir, "dev.context")
dev_qn_path = os.path.join(FLAGS.data_dir, "dev.question")
dev_ans_path = os.path.join(FLAGS.data_dir, "dev.span")
# Initialize model
# qa_model = QAModel(FLAGS, id2word, word2id, emb_matrix)
qa_model = QATransformerModel(FLAGS, id2word, word2id, emb_matrix)
print('qa_model created:', qa_model)
# Some GPU settings
config=tf.ConfigProto()
config.gpu_options.allow_growth = True
# Split by mode
if FLAGS.mode == "train":
# Setup train dir and logfile
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
file_handler = logging.FileHandler(os.path.join(FLAGS.train_dir, "log.txt"))
logging.getLogger().addHandler(file_handler)
print('logger-file created', logging.getLogger())
logging.info('logger info works????')
# Save a record of flags as a .json file in train_dir
with open(os.path.join(FLAGS.train_dir, "flags.json"), 'w') as fout:
json.dump(FLAGS.__flags, fout)
# Make bestmodel dir if necessary
if not os.path.exists(bestmodel_dir):
os.makedirs(bestmodel_dir)
with tf.Session(config=config) as sess:
# Load most recent model
initialize_model(sess, qa_model, FLAGS.train_dir, expect_exists=False)
# Train
qa_model.train(sess, train_context_path, train_qn_path, train_ans_path, dev_qn_path, dev_context_path, dev_ans_path)
elif FLAGS.mode == "show_examples":
with tf.Session(config=config) as sess:
# Load best model
initialize_model(sess, qa_model, bestmodel_dir, expect_exists=True)
# Show examples with F1/EM scores
_, _ = qa_model.check_f1_em(sess, dev_context_path, dev_qn_path, dev_ans_path, "dev", num_samples=10, print_to_screen=True)
elif FLAGS.mode == "official_eval":
if FLAGS.json_in_path == "":
raise Exception("For official_eval mode, you need to specify --json_in_path")
if FLAGS.ckpt_load_dir == "":
raise Exception("For official_eval mode, you need to specify --ckpt_load_dir")
# Read the JSON data from file
qn_uuid_data, context_token_data, qn_token_data = get_json_data(FLAGS.json_in_path)
with tf.Session(config=config) as sess:
# Load model from ckpt_load_dir
initialize_model(sess, qa_model, FLAGS.ckpt_load_dir, expect_exists=True)
# Get a predicted answer for each example in the data
# Return a mapping answers_dict from uuid to answer
answers_dict = generate_answers(sess, qa_model, word2id, qn_uuid_data, context_token_data, qn_token_data)
# Write the uuid->answer mapping a to json file in root dir
print "Writing predictions to %s..." % FLAGS.json_out_path
with io.open(FLAGS.json_out_path, 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(answers_dict, ensure_ascii=False)))
print "Wrote predictions to %s" % FLAGS.json_out_path
else:
raise Exception("Unexpected value of FLAGS.mode: %s" % FLAGS.mode)
if __name__ == "__main__":
tf.app.run()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
altcore/core/wsgi.py
|
"""
WSGI config for altcore project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "altcore.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
functions/source/KubeManifest/lambda_function.py
|
import json
import logging
import boto3
import subprocess
import shlex
import os
import re
from ruamel import yaml
from datetime import date, datetime
from crhelper import CfnResource
from time import sleep
logger = logging.getLogger(__name__)
helper = CfnResource(json_logging=True, log_level='DEBUG')
try:
s3_client = boto3.client('s3')
kms_client = boto3.client('kms')
except Exception as init_exception:
helper.init_failure(init_exception)
def run_command(command):
retries = 0
while True:
try:
try:
logger.debug("executing command: %s" % command)
output = subprocess.check_output(shlex.split(command), stderr=subprocess.STDOUT).decode("utf-8")
logger.debug(output)
except subprocess.CalledProcessError as exc:
logger.error("Command failed with exit code %s, stderr: %s" % (exc.returncode,
exc.output.decode("utf-8")))
raise Exception(exc.output.decode("utf-8"))
return output
except Exception as e:
if 'Unable to connect to the server' not in str(e) or retries >= 5:
raise
logger.debug("{}, retrying in 5 seconds").format(e)
sleep(5)
retries += 1
def create_kubeconfig(bucket, key, kms_context):
try:
os.mkdir("/tmp/.kube/")
except FileExistsError:
pass
try:
retries = 10
while True:
try:
enc_config = s3_client.get_object(Bucket=bucket, Key=key)['Body'].read()
break
except Exception as e:
logger.error(str(e), exc_info=True)
if retries == 0:
raise
sleep(10)
retries -= 1
except Exception as e:
raise Exception("Failed to fetch KubeConfig from S3: %s" % str(e))
kubeconf = kms_client.decrypt(
CiphertextBlob=enc_config,
EncryptionContext=kms_context
)['Plaintext'].decode('utf8')
f = open("/tmp/.kube/config", "w")
f.write(kubeconf)
f.close()
os.environ["KUBECONFIG"] = "/tmp/.kube/config"
def json_serial(o):
if isinstance(o, (datetime, date)):
return o.strftime('%Y-%m-%dT%H:%M:%SZ')
raise TypeError("Object of type '%s' is not JSON serializable" % type(o))
def write_manifest(manifest, path):
f = open(path, "w")
f.write(json.dumps(manifest, default=json_serial))
f.close()
def generate_name(event, physical_resource_id):
manifest = event['ResourceProperties']['Manifest']
if type(manifest) == str:
manifest = yaml.safe_load(manifest)
stack_name = event['StackId'].split('/')[1]
if "metadata" in manifest.keys():
if 'name' not in manifest["metadata"].keys() and 'generateName' not in manifest["metadata"].keys():
if physical_resource_id:
manifest["metadata"]["name"] = physical_resource_id.split('/')[-1]
else:
manifest["metadata"]["generateName"] = "cfn-%s-" % stack_name.lower()
return manifest
def build_output(kube_response):
outp = {}
for key in ["uid", "selfLink", "resourceVersion", "namespace", "name"]:
if key in kube_response["metadata"].keys():
outp[key] = kube_response["metadata"][key]
return outp
def get_config_details(event):
s3_uri_parts = event['ResourceProperties']['KubeConfigPath'].split('/')
if len(s3_uri_parts) < 4 or s3_uri_parts[0:2] != ['s3:', '']:
raise Exception("Invalid KubeConfigPath, must be in the format s3://bucket-name/path/to/config")
bucket = s3_uri_parts[2]
key = "/".join(s3_uri_parts[3:])
kms_context = {"QSContext": event['ResourceProperties']['KubeConfigKmsContext']}
return bucket, key, kms_context
def traverse(obj, path=None, callback=None):
if path is None:
path = []
if isinstance(obj, dict):
value = {k: traverse(v, path + [k], callback)
for k, v in obj.items()}
elif isinstance(obj, list):
value = [traverse(obj[idx], path + [[idx]], callback)
for idx in range(len(obj))]
else:
value = obj
if callback is None:
return value
else:
return callback(path, value)
def traverse_modify(obj, target_path, action):
target_path = to_path(target_path)
def transformer(path, value):
if path == target_path:
return action(value)
else:
return value
return traverse(obj, callback=transformer)
def traverse_modify_all(obj, action):
def transformer(_, value):
return action(value)
return traverse(obj, callback=transformer)
def to_path(path):
if isinstance(path, list):
return path # already in list format
def _iter_path(inner_path):
indexes = [[int(i[1:-1])] for i in re.findall(r'\[[0-9]+\]', inner_path)]
lists = re.split(r'\[[0-9]+\]', inner_path)
for parts in range(len(lists)):
for part in lists[parts].strip('.').split('.'):
yield part
if parts < len(indexes):
yield indexes[parts]
else:
yield []
return list(_iter_path(path))[:-1]
def set_type(input_str):
if type(input_str) == str:
if input_str.lower() == 'false':
return False
if input_str.lower() == 'true':
return True
if input_str.isdigit():
return int(input_str)
return input_str
def fix_types(manifest):
return traverse_modify_all(manifest, set_type)
def aws_auth_configmap(arns, groups, username=None, delete=False):
new = False
outp = ''
try:
outp = run_command("kubectl get configmap/aws-auth -n kube-system -o yaml")
except Exception as e:
if 'configmaps "aws-auth" not found' not in str(e):
raise
new = True
if new:
aws_auth = {
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {"name": "aws-auth", "namespace": "kube-system"},
"data": {}
}
else:
aws_auth = yaml.safe_load(outp)
maps = {"role": [], "user": []}
if 'mapRoles' in aws_auth['data'].keys():
maps['role'] = yaml.safe_load(aws_auth['data']['mapRoles'])
if 'mapUsers' in aws_auth['data'].keys():
maps['user'] = yaml.safe_load(aws_auth['data']['mapUsers'])
for arn in arns:
if arn != 'NotFound':
iam_type = arn.split(':')[5].split("/")[0]
entry = {
"%sarn" % iam_type: arn,
"username": username if username else arn,
"groups": groups
}
if not delete:
maps[iam_type].append(entry)
else:
maps[iam_type] = [value for value in maps[iam_type] if value != entry]
if maps['role']:
aws_auth['data']['mapRoles'] = yaml.dump(maps['role'], default_flow_style=False)
if maps['user']:
aws_auth['data']['mapUsers'] = yaml.dump(maps['user'], default_flow_style=False)
logger.debug(yaml.dump(aws_auth, default_flow_style=False))
write_manifest(aws_auth, '/tmp/aws-auth.json')
kw = 'create' if new else 'replace'
outp = run_command("kubectl %s -f /tmp/aws-auth.json --save-config" % kw)
logger.debug(outp)
def handler_init(event):
logger.debug('Received event: %s' % json.dumps(event, default=json_serial))
physical_resource_id = None
manifest_file = None
if not event['ResourceProperties']['KubeConfigPath'].startswith("s3://"):
raise Exception("KubeConfigPath must be a valid s3 URI (eg.: s3://my-bucket/my-key.txt")
bucket, key, kms_context = get_config_details(event)
create_kubeconfig(bucket, key, kms_context)
if 'Users' in event['ResourceProperties'].keys():
username = None
if 'Username' in event['ResourceProperties']['Users'].keys():
username = event['ResourceProperties']['Users']['Username']
if event['RequestType'] == 'Delete':
aws_auth_configmap(
event['ResourceProperties']['Users']['Arns'],
event['ResourceProperties']['Users']['Groups'],
username,
delete=True
)
else:
aws_auth_configmap(
event['ResourceProperties']['Users']['Arns'],
event['ResourceProperties']['Users']['Groups'],
username
)
if 'Manifest' in event['ResourceProperties'].keys():
manifest_file = '/tmp/manifest.json'
if "PhysicalResourceId" in event.keys():
physical_resource_id = event["PhysicalResourceId"]
if type(event['ResourceProperties']['Manifest']) == str:
manifest = generate_name(event, physical_resource_id)
else:
manifest = fix_types(generate_name(event, physical_resource_id))
write_manifest(manifest, manifest_file)
logger.debug("Applying manifest: %s" % json.dumps(manifest, default=json_serial))
return physical_resource_id, manifest_file
@helper.create
def create_handler(event, _):
physical_resource_id, manifest_file = handler_init(event)
if not manifest_file:
return physical_resource_id
outp = run_command("kubectl create --save-config -o json -f %s" % manifest_file)
helper.Data = build_output(json.loads(outp))
return helper.Data["selfLink"]
@helper.update
def update_handler(event, _):
physical_resource_id, manifest_file = handler_init(event)
if not manifest_file:
return physical_resource_id
outp = run_command("kubectl apply -o json -f %s" % manifest_file)
helper.Data = build_output(json.loads(outp))
return helper.Data["selfLink"]
@helper.delete
def delete_handler(event, _):
physical_resource_id, manifest_file = handler_init(event)
if not manifest_file:
return physical_resource_id
run_command("kubectl delete -f %s" % manifest_file)
def lambda_handler(event, context):
helper(event, context)
|
[] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
python
| 1 | 0 | |
binance/auth.go
|
package binance
import (
"os"
"time"
"strconv"
"net/http"
"net/url"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
)
func AuthorizedRequest(method string, path string, params interface{}, sign bool) (resp *http.Response, err error){
// init
base_url := "https://api.binance.com"
// prepare params
var totalParams string
values := url.Values{}
if params_cast, ok := params.(map[string]string); ok{
if len(params_cast)>0{
for key, value := range params_cast{
values.Add(key, value)
}
}
}
if sign {
timestamp := strconv.FormatInt(time.Now().UnixNano()/int64(1000000), 10)
values.Add("timestamp", timestamp)
values.Add("recvWindow", "5000")
}
totalParams = values.Encode()
// sign by hmac-sha256
if sign {
api_secret := os.Getenv("BN_API_SECRET")
mac := hmac.New(sha256.New, []byte(api_secret))
mac.Write([]byte(totalParams))
sign := hex.EncodeToString(mac.Sum(nil))
values.Add("signature", sign)
}
// prepare request
req, _ := http.NewRequest(method, base_url + path + "?" + values.Encode(), nil)
//set header
api_key := os.Getenv("BN_API_KEY")
req.Header.Add("X-MBX-APIKEY", api_key)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
// do request
client := new(http.Client)
resp, err = client.Do(req)
return
}
|
[
"\"BN_API_SECRET\"",
"\"BN_API_KEY\""
] |
[] |
[
"BN_API_KEY",
"BN_API_SECRET"
] |
[]
|
["BN_API_KEY", "BN_API_SECRET"]
|
go
| 2 | 0 | |
runtime/ibm_cloud_functions/pythonrunner.py
|
"""Executable Python script for running Python actions.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import os
import sys
import codecs
import traceback
sys.path.append('../actionProxy')
from actionproxy import ActionRunner, main, setRunner
class PythonRunner(ActionRunner):
def __init__(self):
ActionRunner.__init__(self, '/action/__main__.py')
self.fn = None
self.mainFn = 'main'
self.global_context = {}
def initCodeFromString(self, message):
# do nothing, defer to build step
return True
def build(self, message):
binary = message['binary'] if 'binary' in message else False
if not binary:
code = message['code']
filename = 'action'
elif os.path.isfile(self.source):
with codecs.open(self.source, 'r', 'utf-8') as m:
code = m.read()
workdir = os.path.dirname(self.source)
sys.path.insert(0, workdir)
os.chdir(workdir)
else:
sys.stderr.write('Zip file does not include ' + os.path.basename(self.source) + '\n')
return False
try:
filename = os.path.basename(self.source)
self.fn = compile(code, filename=filename, mode='exec')
if 'main' in message:
self.mainFn = message['main']
# if the directory 'virtualenv' is extracted out of a zip file
path_to_virtualenv = os.path.dirname(self.source) + '/virtualenv'
if os.path.isdir(path_to_virtualenv):
# activate the virtualenv using activate_this.py contained in the virtualenv
activate_this_file = path_to_virtualenv + '/bin/activate_this.py'
if os.path.exists(activate_this_file):
with open(activate_this_file) as f:
code = compile(f.read(), activate_this_file, 'exec')
exec(code, dict(__file__=activate_this_file))
else:
sys.stderr.write('Invalid virtualenv. Zip file does not include /virtualenv/bin/' + os.path.basename(activate_this_file) + '\n')
return False
return True
except Exception:
traceback.print_exc(file=sys.stderr, limit=0)
return False
def verify(self):
return self.fn is not None
def run(self, args, env):
result = None
try:
os.environ = env
self.global_context['param'] = args
exec(self.fn, self.global_context)
exec('fun = %s(param)' % self.mainFn, self.global_context)
result = self.global_context['fun']
except SystemExit:
sys.stderr.write('Exiting function...')
result = {'status': 'exited'}
except Exception:
traceback.print_exc(file=sys.stderr)
if result and isinstance(result, dict):
return (200, result)
else:
return (502, {'error': 'The action did not return a dictionary.'})
if __name__ == '__main__':
setRunner(PythonRunner())
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/test_wrapper.py
|
import os
import unittest
try:
from unittest.mock import patch, call, ANY, MagicMock
except ImportError:
from mock import patch, call, ANY, MagicMock
from datadog_lambda.wrapper import datadog_lambda_wrapper
from datadog_lambda.metric import lambda_metric
from datadog_lambda.thread_stats_writer import ThreadStatsWriter
def get_mock_context(
aws_request_id="request-id-1",
memory_limit_in_mb="256",
invoked_function_arn="arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:1",
function_version="1",
client_context={},
):
lambda_context = MagicMock()
lambda_context.aws_request_id = aws_request_id
lambda_context.memory_limit_in_mb = memory_limit_in_mb
lambda_context.invoked_function_arn = invoked_function_arn
lambda_context.function_version = function_version
lambda_context.client_context = client_context
return lambda_context
class TestDatadogLambdaWrapper(unittest.TestCase):
def setUp(self):
# Force @datadog_lambda_wrapper to always create a real
# (not no-op) wrapper.
datadog_lambda_wrapper._force_wrap = True
patcher = patch(
"datadog.threadstats.reporters.HttpReporter.flush_distributions"
)
self.mock_threadstats_flush_distributions = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.extract_dd_trace_context")
self.mock_extract_dd_trace_context = patcher.start()
self.mock_extract_dd_trace_context.return_value = ({}, None)
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.set_correlation_ids")
self.mock_set_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.inject_correlation_ids")
self.mock_inject_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.patch_all")
self.mock_patch_all = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.cold_start.is_cold_start")
self.mock_is_cold_start = patcher.start()
self.mock_is_cold_start.return_value = True
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.python_version_tuple")
self.mock_python_version_tuple = patcher.start()
self.mock_python_version_tuple.return_value = ("2", "7", "10")
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.metric.write_metric_point_to_stdout")
self.mock_write_metric_point_to_stdout = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.get_library_version_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
# Mock the layer version so we don't have to update tests on every version bump
self.mock_format_dd_lambda_layer_tag.return_value = "datadog_lambda:v6.6.6"
patcher = patch("datadog_lambda.tags._format_dd_lambda_layer_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
# Mock the layer version so we don't have to update tests on every version bump
self.mock_format_dd_lambda_layer_tag.return_value = (
"dd_lambda_layer:datadog-python27_0.1.0"
)
self.addCleanup(patcher.stop)
def test_datadog_lambda_wrapper(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_handler(lambda_event, lambda_context)
self.mock_threadstats_flush_distributions.assert_has_calls(
[
call(
[
{
"metric": "test.metric",
"points": [[ANY, [100]]],
"type": "distribution",
"host": None,
"device": None,
"tags": ANY,
"interval": 10,
}
]
)
]
)
self.mock_extract_dd_trace_context.assert_called_with(
lambda_event, lambda_context, extractor=None
)
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
self.mock_patch_all.assert_called()
def test_datadog_lambda_wrapper_flush_to_log(self):
os.environ["DD_FLUSH_TO_LOG"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_threadstats_flush_distributions.assert_not_called()
del os.environ["DD_FLUSH_TO_LOG"]
def test_datadog_lambda_wrapper_flush_in_thread(self):
# force ThreadStats to flush in thread
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(True)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
# assert flushing in the thread
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
# assert another flushing in the end
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 2)
# reset ThreadStats
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_not_flush_in_thread(self):
# force ThreadStats to not flush in thread
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
# assert no flushing in the thread
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 0)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
# assert flushing in the end
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
# reset ThreadStats
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_inject_correlation_ids(self):
os.environ["DD_LOGS_INJECTION"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
del os.environ["DD_LOGS_INJECTION"]
def test_invocations_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_errors_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.errors",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_cold_start_tag(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_is_cold_start.return_value = False
lambda_handler(
lambda_event, get_mock_context(aws_request_id="second-request-id")
)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:false",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_latest(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_context.invoked_function_arn = (
"arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:$Latest"
)
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:Latest",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_enhanced_metrics_alias(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
# tests wouldn't run because line was too long
alias_arn = "arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:My_alias-1"
lambda_context.invoked_function_arn = alias_arn
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"executedversion:1",
"resource:python-layer-test:My_alias-1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_no_enhanced_metrics_without_env_var(self):
os.environ["DD_ENHANCED_METRICS"] = "false"
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_not_called()
del os.environ["DD_ENHANCED_METRICS"]
def test_only_one_wrapper_in_use(self):
patcher = patch("datadog_lambda.wrapper.submit_invocations_metric")
self.mock_submit_invocations_metric = patcher.start()
self.addCleanup(patcher.stop)
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
# Turn off _force_wrap to emulate the nested wrapper scenario,
# the second @datadog_lambda_wrapper should actually be no-op.
datadog_lambda_wrapper._force_wrap = False
lambda_handler_double_wrapped = datadog_lambda_wrapper(lambda_handler)
lambda_event = {}
lambda_handler_double_wrapped(lambda_event, get_mock_context())
self.mock_patch_all.assert_called_once()
self.mock_submit_invocations_metric.assert_called_once()
|
[] |
[] |
[
"DD_LOGS_INJECTION",
"DD_FLUSH_TO_LOG",
"DD_ENHANCED_METRICS"
] |
[]
|
["DD_LOGS_INJECTION", "DD_FLUSH_TO_LOG", "DD_ENHANCED_METRICS"]
|
python
| 3 | 0 | |
vendor/code.cloudfoundry.org/cli/command/v2/delete_quota_command.go
|
package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/command"
"code.cloudfoundry.org/cli/command/flag"
)
type DeleteQuotaCommand struct {
RequiredArgs flag.Quota `positional-args:"yes"`
Force bool `short:"f" description:"Force deletion without confirmation"`
usage interface{} `usage:"CF_NAME delete-quota QUOTA [-f]"`
relatedCommands interface{} `related_commands:"quotas"`
}
func (_ DeleteQuotaCommand) Setup(config command.Config, ui command.UI) error {
return nil
}
func (_ DeleteQuotaCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
|
[
"\"CF_TRACE\""
] |
[] |
[
"CF_TRACE"
] |
[]
|
["CF_TRACE"]
|
go
| 1 | 0 | |
test/typecheck_test.py
|
import unittest
import subprocess
import os
import integration_helpers
reset_color = '\x1b[0m'
class TypecheckTest(unittest.TestCase):
def test_project_typechecks(self):
mypy = integration_helpers.find_bin('mypy')
assert mypy, 'Could not find mypy executable'
project_path = integration_helpers.get_project_path()
os.environ['MYPY_FORCE_COLOR'] = '1'
result = subprocess.run([mypy, project_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
if result.returncode != 0:
raise RuntimeError('`$ mypy ' + project_path + '` failed:\n\n' + reset_color + result.stdout)
|
[] |
[] |
[
"MYPY_FORCE_COLOR"
] |
[]
|
["MYPY_FORCE_COLOR"]
|
python
| 1 | 0 | |
_tests/e2e/web/main.go
|
package main
import (
"context"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"os"
"strings"
"time"
)
func main() {
mux := http.NewServeMux()
mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("pong"))
})
mux.HandleFunc("/echo", func(w http.ResponseWriter, r *http.Request) {
if msg, ok := r.URL.Query()["message"]; ok {
w.Write([]byte(msg[0]))
return
}
w.WriteHeader(http.StatusBadRequest)
})
mux.HandleFunc("/delegate", func(w http.ResponseWriter, r *http.Request) {
if url, ok := r.URL.Query()["url"]; ok {
resp, err := httpClient().Get(url[0])
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
w.Write(data)
return
}
w.WriteHeader(http.StatusBadRequest)
})
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
dump, err := httputil.DumpRequest(r, false)
if err == nil {
log.Println(string(dump))
}
mux.ServeHTTP(w, r)
})
if err := http.ListenAndServe(":"+os.Getenv("PORT"), h); err != nil {
log.Fatal(err.Error())
}
}
func httpClient() *http.Client {
dialerFunc := func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{}
port := "53"
if v, ok := os.LookupEnv("DNS_PORT"); ok {
port = v
}
return d.DialContext(ctx, "udp", strings.Replace(address, ":53", fmt.Sprintf(":%s", port), 1))
}
resolver := &net.Resolver{PreferGo: true, Dial: dialerFunc}
dialer := net.Dialer{Resolver: resolver}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: dialer.Dial,
DialContext: dialer.DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
return &http.Client{Transport: transport}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
cmd/load_airtable/main.go
|
package main
import (
"bytes"
"encoding/json"
"os"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/City-Bureau/chicovidchat/pkg/directory"
)
func handler(request events.CloudWatchEvent) error {
airtableBase := os.Getenv("AIRTABLE_BASE")
airtableTable := os.Getenv("AIRTABLE_TABLE")
airtableKey := os.Getenv("AIRTABLE_KEY")
records, err := directory.LoadAirtableResources(airtableBase, airtableTable, airtableKey)
if err != nil {
return err
}
recordsJSON, jsonErr := json.Marshal(records)
if jsonErr != nil {
return err
}
client, _ := session.NewSession()
_, err = s3.New(client).PutObject(&s3.PutObjectInput{
Bucket: aws.String(os.Getenv("S3_BUCKET")),
Key: aws.String("latest.json"),
ACL: aws.String("private"),
Body: bytes.NewReader(recordsJSON),
ContentType: aws.String("application/json"),
})
return err
}
func main() {
lambda.Start(handler)
}
|
[
"\"AIRTABLE_BASE\"",
"\"AIRTABLE_TABLE\"",
"\"AIRTABLE_KEY\"",
"\"S3_BUCKET\""
] |
[] |
[
"AIRTABLE_BASE",
"S3_BUCKET",
"AIRTABLE_KEY",
"AIRTABLE_TABLE"
] |
[]
|
["AIRTABLE_BASE", "S3_BUCKET", "AIRTABLE_KEY", "AIRTABLE_TABLE"]
|
go
| 4 | 0 | |
diff/diff_test.go
|
package diff
import (
"github.com/k0kubun/pp"
"io/ioutil"
"os"
"testing"
"github.com/walf443/mgr/sqlparser/mysql"
)
func init() {
if os.Getenv("DEBUG") == "" {
pp.SetDefaultOutput(ioutil.Discard)
}
}
func TestDiffDatabase(t *testing.T) {
before := "CREATE TABLE hoge (id int unsigned not null AUTO_INCREMENT); CREATE TABLE foo (id int unsigned not null AUTO_INCREMENT);"
after := "CREATE TABLE hoge (id int unsigned not null AUTO_INCREMENT); CREATE TABLE bar (id int unsigned not null AUTO_INCREMENT);"
beforeStmt := parseSQL(t, before)
afterStmt := parseSQL(t, after)
result := Extract(beforeStmt, afterStmt)
pp.Print(result)
if !checkTable(result.Added[0], "`bar`") {
t.Errorf("bar should be added")
}
if !checkTable(result.Removed[0], "`foo`") {
t.Errorf("foo should be added")
}
}
func TestDiffTable(t *testing.T) {
testDiffTable(
t,
"general case",
"CREATE TABLE hoge (id int unsigned not null AUTO_INCREMENT, foo int(10) unsigned not null, key foo (foo))",
"CREATE TABLE hoge (id int unsigned not null AUTO_INCREMENT, bar int(10) unsigned not null, key bar (bar))",
"ALTER TABLE `hoge` DROP `foo`, DROP INDEX `foo`, ADD `bar` INT(10) UNSIGNED NOT NULL , ADD INDEX `bar` (`bar`);",
)
testDiffTable(
t,
"same case",
"CREATE TABLE hoge (id int unsigned not null AUTO_INCREMENT, foo int(10) unsigned not null, key foo (foo))",
"CREATE TABLE hoge (id int unsigned not null AUTO_INCREMENT, foo int(10) unsigned not null, key foo (foo))",
"",
)
testDiffTable(
t,
"unique key",
"CREATE TABLE hoge (id int unsigned not null AUTO_INCREMENT, foo int(10) unsigned not null, unique key foo (foo))",
"CREATE TABLE hoge (id int unsigned not null AUTO_INCREMENT, bar int(10) unsigned not null, unique key bar (bar))",
"ALTER TABLE `hoge` DROP `foo`, DROP INDEX `foo`, ADD `bar` INT(10) UNSIGNED NOT NULL , ADD UNIQUE INDEX `bar` (`bar`);",
)
}
func testDiffTable(t *testing.T, name, before string, after string, expected string) {
beforeStmt := parseCreateTableStatement(t, before)
afterStmt := parseCreateTableStatement(t, after)
result := ExtractTableSchemaDifference(beforeStmt, afterStmt)
sql := result.ToQuery()
if sql != expected {
t.Errorf("failed to testDiffTable \"%s\":\nBefore schema:\n%s\nAfter schema:\n%s\nExpected diff: \t%s\nBut got: \t%s", name, before, after, expected, sql);
}
}
func parseSQL(t *testing.T, sql string) []mysql.Statement {
s := new(mysql.Scanner)
s.Init(sql)
stmt, err := mysql.Parse(s)
if err != nil {
t.Errorf("Faied to parse SQL: %s, error: %q", sql, err)
}
return stmt
}
func parseCreateTableStatement(t *testing.T, sql string) *mysql.CreateTableStatement {
stmt := parseSQL(t, sql)
v, ok := stmt[0].(*mysql.CreateTableStatement)
if !ok {
t.Errorf("Faied to extract CreateTableStatement")
}
return v
}
func checkTable(target mysql.Statement, tableName string) bool {
if v, ok := target.(*mysql.CreateTableStatement); ok {
return v.TableName.ToQuery() == tableName
}
return false
}
func checkColumn(target mysql.CreateDefinition, columnName string) bool {
if v, ok := target.(*mysql.CreateDefinitionColumn); ok {
return v.ColumnName.ToQuery() == columnName
}
return false
}
func checkIndex(target mysql.CreateDefinition, indexName string) bool {
if v, ok := target.(*mysql.CreateDefinitionIndex); ok {
return v.Name.ToQuery() == indexName
} else if v, ok := target.(*mysql.CreateDefinitionUniqueIndex); ok {
return v.Name.ToQuery() == indexName
}
return false
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
mywb/wsgi.py
|
"""
WSGI config for mywb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mywb.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"crypto/tls"
"flag"
"fmt"
"log"
"net/http"
"os"
"strings"
)
type server struct {
site string
synoHost string
user string
pass string
}
func main() {
port := flag.Uint("port", 8080, "HTTP server will listen on this port")
site := flag.String("prefix", "http://www.openoffice.org/distribution/p2p/magnet.html?", "URL prefix for search terms")
dsmHost := flag.String("dsm-host", "192.168.1.2:5001", "host:port for Synology DSM API")
user := flag.String("user", "mytv", "Username for connection to Synology DownloadStation")
pass := flag.String("pass", "", "Password for connection to Synology DownloadStation (overrides env DS_PASS)")
flag.Parse()
if *pass == "" {
*pass = os.Getenv("DS_PASS")
}
s := &server{
site: *site,
synoHost: *dsmHost,
user: *user,
pass: *pass,
}
mux := http.NewServeMux()
mux.HandleFunc("/", s.handleSearch)
mux.HandleFunc("/add/", s.handleAdd)
mux.HandleFunc("/assets/", handleAssets)
srv := http.Server{
Addr: fmt.Sprintf(":%d", *port),
Handler: mux,
TLSConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
log.Fatal(srv.ListenAndServe())
}
func (s *server) handleAdd(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
m := q.Get("magnet")
if m == "" {
w.WriteHeader(http.StatusBadRequest)
return
}
q.Del("magnet")
err := synoAddMagnet(s.synoHost, m+"&"+q.Encode(), s.user, s.pass)
if err != nil {
log.Print(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
}
func (s *server) handleSearch(w http.ResponseWriter, r *http.Request) {
var magnets []magnet
// get query from request
query := r.URL.Query().Get("q")
resp := new(strings.Builder)
resp.WriteString(`<html>
<head>
<title>Magnet Search</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no" />
<link href="assets/magnet.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="assets/magnet.js"></script>
</head>
<body>
<div><b>Torrent magnets available from %s%s:</b></div><br />`)
magnets, err := getMagnets(s.site + query)
if err != nil {
log.Printf("failed to get magnets: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
for _, m := range magnets {
resp.WriteString(fmt.Sprintf(`<a href="" onClick="if(confirm('Download %s?'))sendRequest('add/?magnet=%s'); return false;">%s</a><br />`, m.Name, m.URL, m.Name))
}
resp.WriteString(`</body></html>`)
w.Write([]byte(fmt.Sprintf(resp.String(), s.site, query)))
}
func handleAssets(w http.ResponseWriter, r *http.Request) {
path := strings.TrimPrefix(r.URL.Path, "/assets/")
a, ok := assets[path]
if !ok {
w.WriteHeader(http.StatusNotFound)
return
}
w.Header().Set("Content-Type", a.contentType)
w.Write(a.content)
}
|
[
"\"DS_PASS\""
] |
[] |
[
"DS_PASS"
] |
[]
|
["DS_PASS"]
|
go
| 1 | 0 | |
contrib/document_cleanup/light_weight_document_cleanup_ICDAR2021/infer.py
|
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.models import model_from_json
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
import os
from model import convert2gray
from utils import GetOverlappingBlocks, CombineToImage,load_tf_img,getListOfFiles
from tqdm import tqdm
import cv2
import numpy as np
#os.environ["CUDA_VISIBLE_DEVICES"]= '0'
#gpu_devices = tf.config.experimental.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(gpu_devices[0], True)
def prepare_data_blocks(blocks,size):
data = []
for block in blocks:
data.append(load_tf_img(block,size))
#blocks = []
return data
def infer(model_name,model_weight,target_dir,save_out_dir,block_size=(256,256),batch_size=1):
json_file = open(model_name, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json,custom_objects={'relu6': tf.nn.relu6, 'convert2gray': convert2gray})
model.summary()
#exit(0)
model.compile(optimizer='adam', loss = 'mean_squared_error')
model.load_weights(model_weight)
if not os.path.exists(save_out_dir):
os.makedirs(save_out_dir)
M = block_size[0]
N = block_size[1]
part = 8
filelists = getListOfFiles(target_dir)
for filename in tqdm(filelists):
initial_filename = os.path.splitext(filename)[0]
in1_filename = os.path.join(target_dir,filename)
in_clr = cv2.imread(in1_filename,1)
in1_image = cv2.cvtColor(in_clr, cv2.COLOR_BGR2RGB)
in1_img = GetOverlappingBlocks(in1_image.copy(),M,N,part)
prepared_data_blocks = prepare_data_blocks(in1_img,M)
in1_img = []
out_img1 = model.predict(tf.convert_to_tensor(prepared_data_blocks), batch_size=batch_size)
num_img,ht,wd,ch_out = out_img1.shape
h,w,ch = in_clr.shape
if(ch_out>1):
c_image = cv2.cvtColor(CombineToImage(out_img1,h,w,ch_out), cv2.COLOR_RGB2BGR,part)
out_image_name = initial_filename + '.png'
name_fig = os.path.join(save_out_dir, out_image_name)
cv2.imwrite(name_fig,c_image)
else:
c_image = CombineToImage(out_img1,h,w,ch_out,part)
out_image_name = initial_filename + '.png'
name_fig = os.path.join(save_out_dir, out_image_name)
cv2.imwrite(name_fig,c_image)
def infer_image(model_name,model_weight,target_image,out_image_name,block_size=(256,256),batch_size=1):
json_file = open(model_name, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json,custom_objects={'relu6': tf.nn.relu6})
#model = model_from_json(loaded_model_json,custom_objects={'HeNormal':tf.keras.initializers.he_normal(),'relu6': tf.nn.relu6, 'convert2gray': convert2gray,'Functional':tf.keras.models.Model})
model.summary()
#exit(0)
model.compile(optimizer='adam', loss = 'mean_squared_error')
model.load_weights(model_weight)
#if not os.path.exists(save_out_dir):
# os.makedirs(save_out_dir)
M = block_size[0]
N = block_size[1]
#print(M,N)
part = 8
in_clr = cv2.imread(target_image,1)
in1_image = cv2.cvtColor(in_clr, cv2.COLOR_BGR2RGB)
in1_img = GetOverlappingBlocks(in1_image.copy(),M,N,part)
#print(len(in1_img))
prepared_data_blocks = prepare_data_blocks(in1_img,M)
in1_img = []
#prepared_data_blocks = NewGetOverlappingBlocks(in_clr.copy(),M,N,part)
out_img1 = model.predict(tf.convert_to_tensor(prepared_data_blocks), batch_size=batch_size)
num_img,ht,wd,ch_out = out_img1.shape
h,w,ch = in_clr.shape
#print(num_img)
if(ch_out>1):
c_image = cv2.cvtColor(CombineToImage(out_img1,h,w,ch_out), cv2.COLOR_RGB2BGR,part)
cv2.imwrite(out_image_name,c_image)
else:
c_image = CombineToImage(out_img1,h,w,ch_out,part)
cv2.imwrite(out_image_name,c_image)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
vvhgvs/dataproviders/seqfetcher.py
|
# -*- coding: utf-8 -*-
"""provides sequencing fetching from NCBI and Ensembl
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import re
import bioutils.seqfetcher
from ..exceptions import HGVSDataNotAvailableError
_logger = logging.getLogger(__name__)
class SeqFetcher(object):
"""This class is intended primarily as a mixin for HGVS data providers
that doen't otherwise have access to sequence data. It uses the
fetch_seq() function in this module to fetch sequences from
several sources; see that function for details.
>> sf = SeqFetcher()
>> sf.fetch_seq('NP_056374.2',0,10)
'MESRETLSSS'
"""
def __init__(self):
# If HGVS_SEQREPO_DIR is defined, we use seqrepo for *all* sequences
# Otherwise, we fall back to remote sequence fetching
seqrepo_dir = os.environ.get("HGVS_SEQREPO_DIR")
if seqrepo_dir:
from biocommons.seqrepo import SeqRepo
sr = SeqRepo(seqrepo_dir, check_same_thread=False)
def _fetch_seq_seqrepo(ac, start_i=None, end_i=None):
return sr.fetch(ac, start_i, end_i)
self.fetcher = _fetch_seq_seqrepo
self.source = "SeqRepo ({})".format(seqrepo_dir)
else:
quit("""
V.V. usage can be quite heavy, variant validators "test_configuration.py" asserts that
we should at least explicitly chose the location, therefore, for vvhgvs, disable silent
public fallback, explicitly set a external seqrepo location if remote data is needed.
""")
self.fetcher = bioutils.seqfetcher.fetch_seq
self.source = "bioutils.seqfetcher"
_logger.info("Fetching sequences with " + self.source)
def fetch_seq(self, ac, start_i=None, end_i=None):
try:
return self.fetcher(ac, start_i, end_i)
except Exception as ex:
raise HGVSDataNotAvailableError("Failed to fetch {ac} from {self.source} ({ex})".format(
ac=ac, ex=ex, self=self))
# <LICENSE>
# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
|
[] |
[] |
[
"HGVS_SEQREPO_DIR"
] |
[]
|
["HGVS_SEQREPO_DIR"]
|
python
| 1 | 0 | |
tests/unit/streamalert/apps/test_apps/test_gsuite.py
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import socket
import ssl
from datetime import datetime, timedelta
import googleapiclient
from google.auth import exceptions
from mock import Mock, mock_open, patch
from moto import mock_ssm
from nose.tools import assert_equal, assert_false, assert_count_equal, assert_true, raises
from streamalert.apps._apps.gsuite import GSuiteReportsApp
from tests.unit.streamalert.apps.test_helpers import get_event, put_mock_params
from tests.unit.streamalert.shared.test_config import get_mock_lambda_context
@mock_ssm
@patch.object(GSuiteReportsApp, '_type', Mock(return_value='admin'))
@patch.object(GSuiteReportsApp, 'type', Mock(return_value='type'))
class TestGSuiteReportsApp:
"""Test class for the GSuiteReportsApp"""
# pylint: disable=protected-access
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def setup(self):
"""Setup before each method"""
# pylint: disable=attribute-defined-outside-init
self._test_app_name = 'gsuite_admin'
put_mock_params(self._test_app_name)
self._event = get_event(self._test_app_name)
self._context = get_mock_lambda_context(self._test_app_name)
self._app = GSuiteReportsApp(self._event, self._context)
def test_sleep(self):
"""GSuiteReportsApp - Sleep Seconds"""
assert_equal(self._app._sleep_seconds(), 0)
def test_required_auth_info(self):
"""GSuiteReportsApp - Required Auth Info"""
assert_count_equal(list(self._app.required_auth_info().keys()),
{'delegation_email', 'keyfile'})
@patch('google.oauth2.service_account.Credentials.from_service_account_info',
Mock(return_value=True))
def test_keyfile_validator(self):
"""GSuiteReportsApp - Keyfile Validation, Success"""
validation_function = self._app.required_auth_info()['keyfile']['format']
data = {'test': 'keydata'}
mocker = mock_open(read_data=json.dumps(data))
with patch('builtins.open', mocker):
loaded_keydata = validation_function('fakepath')
assert_equal(loaded_keydata, data)
@patch('google.oauth2.service_account.Credentials.from_service_account_info')
def test_keyfile_validator_failure(self, cred_mock):
"""GSuiteReportsApp - Keyfile Validation, Failure"""
validation_function = self._app.required_auth_info()['keyfile']['format']
cred_mock.return_value = False
mocker = mock_open(read_data=json.dumps({'test': 'keydata'}))
with patch('builtins.open', mocker):
assert_false(validation_function('fakepath'))
cred_mock.assert_called()
@patch('google.oauth2.service_account.Credentials.from_service_account_info')
def test_keyfile_validator_bad_json(self, cred_mock):
"""GSuiteReportsApp - Keyfile Validation, Bad JSON"""
validation_function = self._app.required_auth_info()['keyfile']['format']
mocker = mock_open(read_data='invalid json')
with patch('builtins.open', mocker):
assert_false(validation_function('fakepath'))
cred_mock.assert_not_called()
@patch('google.oauth2.service_account.Credentials.from_service_account_info',
Mock(return_value=True))
def test_load_credentials(self):
"""GSuiteReportsApp - Load Credentials, Success"""
assert_true(self._app._load_credentials('fakedata'))
@patch('google.oauth2.service_account.Credentials.from_service_account_info')
def test_load_credentials_bad(self, cred_mock):
"""GSuiteReportsApp - Load Credentials, ValueError"""
cred_mock.side_effect = ValueError('Bad things happened')
assert_false(self._app._load_credentials('fakedata'))
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._load_credentials', Mock())
@patch('streamalert.apps._apps.gsuite.googleapiclient.discovery.build')
def test_create_service(self, build_mock):
"""GSuiteReportsApp - Create Service, Success"""
build_mock.return_value.activities.return_value = True
assert_true(self._app._create_service())
@patch('logging.Logger.debug')
def test_create_service_exists(self, log_mock):
"""GSuiteReportsApp - Create Service, Exists"""
self._app._activities_service = True
assert_true(self._app._create_service())
log_mock.assert_called_with('[%s] Service already instantiated', self._app)
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._load_credentials',
Mock(return_value=False))
def test_create_service_fail_creds(self):
"""GSuiteReportsApp - Create Service, Credential Failure"""
assert_false(self._app._create_service())
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._load_credentials', Mock())
@patch('logging.Logger.exception')
@patch('streamalert.apps._apps.gsuite.googleapiclient.discovery.build')
def test_create_service_api_error(self, build_mock, log_mock):
"""GSuiteReportsApp - Create Service, Google API Error"""
build_mock.side_effect = googleapiclient.errors.Error('This is bad')
assert_false(self._app._create_service())
log_mock.assert_called_with('[%s] Failed to build discovery service', self._app)
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._load_credentials', Mock())
@patch('logging.Logger.exception')
@patch('streamalert.apps._apps.gsuite.googleapiclient.discovery.build')
def test_create_service_ssl_error(self, build_mock, log_mock):
"""GSuiteReportsApp - Create Service, SSL Handshake Error"""
build_mock.side_effect = ssl.SSLError('_ssl.c:574: The handshake operation timed out')
assert_false(self._app._create_service())
log_mock.assert_called_with('[%s] Failed to build discovery service', self._app)
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._load_credentials', Mock())
@patch('logging.Logger.exception')
@patch('streamalert.apps._apps.gsuite.googleapiclient.discovery.build')
def test_create_service_socket_error(self, build_mock, log_mock):
"""GSuiteReportsApp - Create Service, Socket Timeout"""
build_mock.side_effect = socket.timeout('timeout: timed out')
assert_false(self._app._create_service())
log_mock.assert_called_with('[%s] Failed to build discovery service', self._app)
def test_gather_logs(self):
"""GSuiteReportsApp - Gather Logs, Success"""
with patch.object(self._app, '_activities_service') as service_mock:
payload = {
'kind': 'reports#auditActivities',
'nextPageToken': 'the next page\'s token',
'items': self._get_sample_logs(10)
}
service_mock.list.return_value.execute.return_value = payload
assert_equal(len(self._app._gather_logs()), 10)
assert_equal(self._app._last_timestamp, '2011-06-17T15:39:18.460000Z')
assert_equal(self._app._context['last_event_ids'], [-12345678901234567890])
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._create_service',
Mock(return_value=True))
@patch('logging.Logger.exception')
def test_gather_logs_http_error(self, log_mock):
"""GSuiteReportsApp - Gather Logs, Google API HTTP Error"""
with patch.object(self._app, '_activities_service') as service_mock:
error = googleapiclient.errors.HttpError('response', 'bad'.encode())
service_mock.list.return_value.execute.side_effect = error
assert_false(self._app._gather_logs())
log_mock.assert_called_with('[%s] Failed to execute activities listing', self._app)
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._create_service',
Mock(return_value=True))
@patch('logging.Logger.exception')
def test_gather_logs_token_error(self, log_mock):
"""GSuiteReportsApp - Gather Logs, Google API Token Error"""
with patch.object(self._app, '_activities_service') as service_mock:
error = exceptions.RefreshError('bad')
service_mock.list.return_value.execute.side_effect = error
assert_false(self._app._gather_logs())
log_mock.assert_called_with('[%s] Failed to execute activities listing', self._app)
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._create_service',
Mock(return_value=True))
@patch('logging.Logger.exception')
def test_gather_logs_ssl_error(self, log_mock):
"""GSuiteReportsApp - Gather Logs, SSL Handshake Error"""
with patch.object(self._app, '_activities_service') as service_mock:
error = ssl.SSLError('_ssl.c:574: The handshake operation timed out')
service_mock.list.return_value.execute.side_effect = error
assert_false(self._app._gather_logs())
log_mock.assert_called_with('[%s] Failed to execute activities listing', self._app)
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._create_service',
Mock(return_value=True))
@patch('logging.Logger.exception')
def test_gather_logs_socket_error(self, log_mock):
"""GSuiteReportsApp - Gather Logs, Socket Timeout"""
with patch.object(self._app, '_activities_service') as service_mock:
error = socket.timeout('timeout: timed out')
service_mock.list.return_value.execute.side_effect = error
assert_false(self._app._gather_logs())
log_mock.assert_called_with('[%s] Failed to execute activities listing', self._app)
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._load_credentials',
Mock(return_value=False))
def test_gather_logs_no_service(self):
"""GSuiteReportsApp - Gather Logs, No Service"""
with patch.object(self._app, '_activities_service') as service_mock:
self._app._activities_service = False
assert_false(self._app._gather_logs())
service_mock.list.assert_not_called()
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._create_service',
Mock(return_value=True))
@patch('logging.Logger.error')
def test_gather_logs_no_results(self, log_mock):
"""GSuiteReportsApp - Gather Logs, No Results From API"""
with patch.object(self._app, '_activities_service') as service_mock:
service_mock.list.return_value.execute.return_value = None
assert_false(self._app._gather_logs())
log_mock.assert_called_with(
'[%s] No results received from the G Suite API request', self._app
)
@patch('streamalert.apps._apps.gsuite.GSuiteReportsApp._create_service',
Mock(return_value=True))
@patch('logging.Logger.info')
def test_gather_logs_empty_items(self, log_mock):
"""GSuiteReportsApp - Gather Logs, Empty Activities List"""
with patch.object(self._app, '_activities_service') as service_mock:
payload = {
'kind': 'reports#auditActivities',
'nextPageToken': 'the next page\'s token',
'items': []
}
service_mock.list.return_value.execute.return_value = payload
assert_false(self._app._gather_logs())
log_mock.assert_called_with(
'[%s] No logs in response from G Suite API request', self._app
)
def test_gather_logs_remove_duplicate_events(self):
"""GSuiteReportsApp - Gather Logs, Remove duplicate events"""
with patch.object(self._app, '_activities_service') as service_mock:
payload = {
'kind': 'reports#auditActivities',
'nextPageToken': None,
'items': self._get_sample_logs(10)
}
service_mock.list.return_value.execute.return_value = payload
self._app._context['last_event_ids'] = [
-12345678901234567890 + 9,
-12345678901234567890 + 8
]
assert_equal(len(self._app._gather_logs()), 8)
assert_equal(self._app._last_timestamp, '2011-06-17T15:39:18.460000Z')
assert_equal(self._app._more_to_poll, False)
assert_equal(self._app._context['last_event_ids'], [-12345678901234567890])
@staticmethod
def _get_sample_logs(count):
"""Helper function for returning sample gsuite (admin) logs"""
def _get_timestamp(start_timestamp, subtract_seconds):
timestamp = datetime.strptime(start_timestamp, GSuiteReportsApp.date_formatter())
timestamp -= timedelta(seconds=subtract_seconds)
return timestamp.strftime(GSuiteReportsApp.date_formatter())
return [{
'kind': 'audit#activity',
'id': {
'time': _get_timestamp('2011-06-17T15:39:18.460000Z', index),
'uniqueQualifier': -12345678901234567890 + index,
'applicationName': 'admin',
'customerId': 'C03az79cb'
},
'actor': {
'callerType': 'USER',
'email': '[email protected]',
'profileId': 'user\'s unique G Suite profile ID',
'key': 'consumer key of requestor in OAuth 2LO requests'
},
'ownerDomain': 'example.com',
'ipAddress': 'user\'s IP address',
'events': [
{
'type': 'GROUP_SETTINGS',
'name': 'CHANGE_GROUP_SETTING',
'parameters': [
{
'name': 'SETTING_NAME',
'value': 'WHO_CAN_JOIN',
'intValue': 'integer value of parameter',
'boolValue': 'boolean value of parameter'
}
]
}
]
} for index in range(count)]
@raises(NotImplementedError)
def test_type_not_implemented():
"""GSuiteReportsApp - Subclass Type Not Implemented"""
# pylint: disable=protected-access,abstract-method
class GSuiteFakeApp(GSuiteReportsApp):
"""Fake GSuiteReports app that should raise a NotImplementedError"""
GSuiteFakeApp._type()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
k8sclient/k8sclient.go
|
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8sclient
import (
"encoding/json"
"fmt"
"net"
"os"
"regexp"
"strings"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/intel/multus-cni/kubeletclient"
"github.com/intel/multus-cni/logging"
"github.com/intel/multus-cni/types"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
netclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1"
netutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
)
const (
resourceNameAnnot = "k8s.v1.cni.cncf.io/resourceName"
defaultNetAnnot = "v1.multus-cni.io/default-network"
networkAttachmentAnnot = "k8s.v1.cni.cncf.io/networks"
)
// NoK8sNetworkError indicates error, no network in kubernetes
type NoK8sNetworkError struct {
message string
}
// ClientInfo contains information given from k8s client
type ClientInfo struct {
Client kubernetes.Interface
NetClient netclient.K8sCniCncfIoV1Interface
EventBroadcaster record.EventBroadcaster
EventRecorder record.EventRecorder
}
// AddPod adds pod into kubernetes
func (c *ClientInfo) AddPod(pod *v1.Pod) (*v1.Pod, error) {
return c.Client.Core().Pods(pod.ObjectMeta.Namespace).Create(pod)
}
// GetPod gets pod from kubernetes
func (c *ClientInfo) GetPod(namespace, name string) (*v1.Pod, error) {
return c.Client.Core().Pods(namespace).Get(name, metav1.GetOptions{})
}
// DeletePod deletes a pod from kubernetes
func (c *ClientInfo) DeletePod(namespace, name string) error {
return c.Client.Core().Pods(namespace).Delete(name, &metav1.DeleteOptions{})
}
// AddNetAttachDef adds net-attach-def into kubernetes
func (c *ClientInfo) AddNetAttachDef(netattach *nettypes.NetworkAttachmentDefinition) (*nettypes.NetworkAttachmentDefinition, error) {
return c.NetClient.NetworkAttachmentDefinitions(netattach.ObjectMeta.Namespace).Create(netattach)
}
// Eventf puts event into kubernetes events
func (c *ClientInfo) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
if c != nil && c.EventRecorder != nil {
c.EventRecorder.Eventf(object, eventtype, reason, messageFmt, args...)
}
}
func (e *NoK8sNetworkError) Error() string { return string(e.message) }
// SetNetworkStatus sets network status into Pod annotation
func SetNetworkStatus(client *ClientInfo, k8sArgs *types.K8sArgs, netStatus []nettypes.NetworkStatus, conf *types.NetConf) error {
var err error
logging.Debugf("SetNetworkStatus: %v, %v, %v, %v", client, k8sArgs, netStatus, conf)
client, err = GetK8sClient(conf.Kubeconfig, client)
if err != nil {
return logging.Errorf("SetNetworkStatus: %v", err)
}
if client == nil || client.Client == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return logging.Errorf("SetNetworkStatus: must have either Kubernetes config or delegates")
}
logging.Debugf("SetNetworkStatus: kube client info is not defined, skip network status setup")
return nil
}
podName := string(k8sArgs.K8S_POD_NAME)
podNamespace := string(k8sArgs.K8S_POD_NAMESPACE)
pod, err := client.GetPod(podNamespace, podName)
if err != nil {
return logging.Errorf("SetNetworkStatus: failed to query the pod %v in out of cluster comm: %v", podName, err)
}
if netStatus != nil {
err = netutils.SetNetworkStatus(client.Client, pod, netStatus)
if err != nil {
return logging.Errorf("SetNetworkStatus: failed to update the pod %v in out of cluster comm: %v", podName, err)
}
}
return nil
}
func parsePodNetworkObjectName(podnetwork string) (string, string, string, error) {
var netNsName string
var netIfName string
var networkName string
logging.Debugf("parsePodNetworkObjectName: %s", podnetwork)
slashItems := strings.Split(podnetwork, "/")
if len(slashItems) == 2 {
netNsName = strings.TrimSpace(slashItems[0])
networkName = slashItems[1]
} else if len(slashItems) == 1 {
networkName = slashItems[0]
} else {
return "", "", "", logging.Errorf("parsePodNetworkObjectName: Invalid network object (failed at '/')")
}
atItems := strings.Split(networkName, "@")
networkName = strings.TrimSpace(atItems[0])
if len(atItems) == 2 {
netIfName = strings.TrimSpace(atItems[1])
} else if len(atItems) != 1 {
return "", "", "", logging.Errorf("parsePodNetworkObjectName: Invalid network object (failed at '@')")
}
// Check and see if each item matches the specification for valid attachment name.
// "Valid attachment names must be comprised of units of the DNS-1123 label format"
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
// And we allow at (@), and forward slash (/) (units separated by commas)
// It must start and end alphanumerically.
allItems := []string{netNsName, networkName, netIfName}
for i := range allItems {
matched, _ := regexp.MatchString("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", allItems[i])
if !matched && len([]rune(allItems[i])) > 0 {
return "", "", "", logging.Errorf(fmt.Sprintf("parsePodNetworkObjectName: Failed to parse: one or more items did not match comma-delimited format (must consist of lower case alphanumeric characters). Must start and end with an alphanumeric character), mismatch @ '%v'", allItems[i]))
}
}
logging.Debugf("parsePodNetworkObjectName: parsed: %s, %s, %s", netNsName, networkName, netIfName)
return netNsName, networkName, netIfName, nil
}
func parsePodNetworkAnnotation(podNetworks, defaultNamespace string) ([]*types.NetworkSelectionElement, error) {
var networks []*types.NetworkSelectionElement
logging.Debugf("parsePodNetworkAnnotation: %s, %s", podNetworks, defaultNamespace)
if podNetworks == "" {
return nil, logging.Errorf("parsePodNetworkAnnotation: pod annotation does not have \"network\" as key")
}
if strings.IndexAny(podNetworks, "[{\"") >= 0 {
if err := json.Unmarshal([]byte(podNetworks), &networks); err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: failed to parse pod Network Attachment Selection Annotation JSON format: %v", err)
}
} else {
// Comma-delimited list of network attachment object names
for _, item := range strings.Split(podNetworks, ",") {
// Remove leading and trailing whitespace.
item = strings.TrimSpace(item)
// Parse network name (i.e. <namespace>/<network name>@<ifname>)
netNsName, networkName, netIfName, err := parsePodNetworkObjectName(item)
if err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: %v", err)
}
networks = append(networks, &types.NetworkSelectionElement{
Name: networkName,
Namespace: netNsName,
InterfaceRequest: netIfName,
})
}
}
for _, n := range networks {
if n.Namespace == "" {
n.Namespace = defaultNamespace
}
if n.MacRequest != "" {
// validate MAC address
if _, err := net.ParseMAC(n.MacRequest); err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: failed to mac: %v", err)
}
}
if n.IPRequest != nil {
for _, ip := range n.IPRequest {
// validate IP address
if strings.Contains(ip, "/") {
if _, _, err := net.ParseCIDR(ip); err != nil {
return nil, logging.Errorf("failed to parse CIDR %q: %v", ip, err)
}
} else if net.ParseIP(ip) == nil {
return nil, logging.Errorf("failed to parse IP address %q", ip)
}
}
}
// compatibility pre v3.2, will be removed in v4.0
if n.DeprecatedInterfaceRequest != "" && n.InterfaceRequest == "" {
n.InterfaceRequest = n.DeprecatedInterfaceRequest
}
}
return networks, nil
}
func getKubernetesDelegate(client *ClientInfo, net *types.NetworkSelectionElement, confdir string, pod *v1.Pod, resourceMap map[string]*types.ResourceInfo) (*types.DelegateNetConf, map[string]*types.ResourceInfo, error) {
logging.Debugf("getKubernetesDelegate: %v, %v, %s, %v, %v", client, net, confdir, pod, resourceMap)
customResource, err := client.NetClient.NetworkAttachmentDefinitions(net.Namespace).Get(net.Name, metav1.GetOptions{})
if err != nil {
errMsg := fmt.Sprintf("cannot find a network-attachment-definition (%s) in namespace (%s): %v", net.Name, net.Namespace, err)
if client != nil {
client.Eventf(pod, v1.EventTypeWarning, "NoNetworkFound", errMsg)
}
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: " + errMsg)
}
// Get resourceName annotation from NetworkAttachmentDefinition
deviceID := ""
resourceName, ok := customResource.GetAnnotations()[resourceNameAnnot]
if ok && pod.Name != "" && pod.Namespace != "" {
// ResourceName annotation is found; try to get device info from resourceMap
logging.Debugf("getKubernetesDelegate: found resourceName annotation : %s", resourceName)
if resourceMap == nil {
ck, err := kubeletclient.GetResourceClient()
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get a ResourceClient instance: %v", err)
}
resourceMap, err = ck.GetPodResourceMap(pod)
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get resourceMap from ResourceClient: %v", err)
}
logging.Debugf("getKubernetesDelegate: resourceMap instance: %+v", resourceMap)
}
entry, ok := resourceMap[resourceName]
if ok {
if idCount := len(entry.DeviceIDs); idCount > 0 && idCount > entry.Index {
deviceID = entry.DeviceIDs[entry.Index]
logging.Debugf("getKubernetesDelegate: podName: %s deviceID: %s", pod.Name, deviceID)
entry.Index++ // increment Index for next delegate
}
}
}
configBytes, err := netutils.GetCNIConfig(customResource, confdir)
if err != nil {
return nil, resourceMap, err
}
delegate, err := types.LoadDelegateNetConf(configBytes, net, deviceID)
if err != nil {
return nil, resourceMap, err
}
return delegate, resourceMap, nil
}
// GetK8sArgs gets k8s related args from CNI args
func GetK8sArgs(args *skel.CmdArgs) (*types.K8sArgs, error) {
k8sArgs := &types.K8sArgs{}
logging.Debugf("GetK8sArgs: %v", args)
err := cnitypes.LoadArgs(args.Args, k8sArgs)
if err != nil {
return nil, err
}
return k8sArgs, nil
}
// TryLoadPodDelegates attempts to load Kubernetes-defined delegates and add them to the Multus config.
// Returns the number of Kubernetes-defined delegates added or an error.
func TryLoadPodDelegates(k8sArgs *types.K8sArgs, conf *types.NetConf, clientInfo *ClientInfo) (int, *v1.Pod, *ClientInfo, error) {
var err error
logging.Debugf("TryLoadPodDelegates: %v, %v, %v", k8sArgs, conf, clientInfo)
clientInfo, err = GetK8sClient(conf.Kubeconfig, clientInfo)
if err != nil {
return 0, nil, nil, err
}
if clientInfo == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return 0, nil, nil, logging.Errorf("TryLoadPodDelegates: must have either Kubernetes config or delegates")
}
return 0, nil, nil, nil
}
// Get the pod info. If cannot get it, we use cached delegates
pod, err := clientInfo.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
if err != nil {
logging.Debugf("TryLoadPodDelegates: Err in loading K8s cluster default network from pod annotation: %v, use cached delegates", err)
return 0, nil, nil, nil
}
delegate, err := tryLoadK8sPodDefaultNetwork(clientInfo, pod, conf)
if err != nil {
return 0, nil, nil, logging.Errorf("TryLoadPodDelegates: error in loading K8s cluster default network from pod annotation: %v", err)
}
if delegate != nil {
logging.Debugf("TryLoadPodDelegates: Overwrite the cluster default network with %v from pod annotations", delegate)
conf.Delegates[0] = delegate
}
networks, err := GetPodNetwork(pod)
if networks != nil {
delegates, err := GetNetworkDelegates(clientInfo, pod, networks, conf.ConfDir, conf.NamespaceIsolation)
if err != nil {
if _, ok := err.(*NoK8sNetworkError); ok {
return 0, nil, clientInfo, nil
}
return 0, nil, nil, logging.Errorf("TryLoadPodDelegates: error in getting k8s network for pod: %v", err)
}
if err = conf.AddDelegates(delegates); err != nil {
return 0, nil, nil, err
}
// Check gatewayRequest is configured in delegates
// and mark its config if gateway filter is required
isGatewayConfigured := false
for _, delegate := range conf.Delegates {
if delegate.GatewayRequest != nil {
isGatewayConfigured = true
break
}
}
if isGatewayConfigured == true {
types.CheckGatewayConfig(conf.Delegates)
}
return len(delegates), pod, clientInfo, nil
}
return 0, pod, clientInfo, nil
}
// GetK8sClient gets client info from kubeconfig
func GetK8sClient(kubeconfig string, kubeClient *ClientInfo) (*ClientInfo, error) {
logging.Debugf("GetK8sClient: %s, %v", kubeconfig, kubeClient)
// If we get a valid kubeClient (eg from testcases) just return that
// one.
if kubeClient != nil {
return kubeClient, nil
}
var err error
var config *rest.Config
// Otherwise try to create a kubeClient from a given kubeConfig
if kubeconfig != "" {
// uses the current context in kubeconfig
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, logging.Errorf("GetK8sClient: failed to get context for the kubeconfig %v: %v", kubeconfig, err)
}
} else if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
// Try in-cluster config where multus might be running in a kubernetes pod
config, err = rest.InClusterConfig()
if err != nil {
return nil, logging.Errorf("GetK8sClient: failed to get context for in-cluster kube config: %v", err)
}
} else {
// No kubernetes config; assume we shouldn't talk to Kube at all
return nil, nil
}
// Specify that we use gRPC
config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
config.ContentType = "application/vnd.kubernetes.protobuf"
// Set the config timeout to one minute.
config.Timeout = time.Minute
// creates the clientset
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
netclient, err := netclient.NewForConfig(config)
if err != nil {
return nil, err
}
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(klog.Infof)
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "multus"})
return &ClientInfo{
Client: client,
NetClient: netclient,
EventBroadcaster: broadcaster,
EventRecorder: recorder,
}, nil
}
// GetPodNetwork gets net-attach-def annotation from pod
func GetPodNetwork(pod *v1.Pod) ([]*types.NetworkSelectionElement, error) {
logging.Debugf("GetPodNetwork: %v", pod)
netAnnot := pod.Annotations[networkAttachmentAnnot]
defaultNamespace := pod.ObjectMeta.Namespace
if len(netAnnot) == 0 {
return nil, &NoK8sNetworkError{"no kubernetes network found"}
}
networks, err := parsePodNetworkAnnotation(netAnnot, defaultNamespace)
if err != nil {
return nil, err
}
return networks, nil
}
// GetNetworkDelegates returns delegatenetconf from net-attach-def annotation in pod
func GetNetworkDelegates(k8sclient *ClientInfo, pod *v1.Pod, networks []*types.NetworkSelectionElement, confdir string, confnamespaceIsolation bool) ([]*types.DelegateNetConf, error) {
logging.Debugf("GetNetworkDelegates: %v, %v, %v, %v, %v", k8sclient, pod, networks, confdir, confnamespaceIsolation)
// resourceMap holds Pod device allocation information; only initizized if CRD contains 'resourceName' annotation.
// This will only be initialized once and all delegate objects can reference this to look up device info.
var resourceMap map[string]*types.ResourceInfo
// Read all network objects referenced by 'networks'
var delegates []*types.DelegateNetConf
defaultNamespace := pod.ObjectMeta.Namespace
for _, net := range networks {
// The pods namespace (stored as defaultNamespace, does not equal the annotation's target namespace in net.Namespace)
// In the case that this is a mismatch when namespaceisolation is enabled, this should be an error.
if confnamespaceIsolation {
if defaultNamespace != net.Namespace {
// There is an exception however, we always allow a reference to the default namespace.
if net.Namespace != "default" {
return nil, logging.Errorf("GetNetworkDelegates: namespace isolation enabled, annotation violates permission, pod is in namespace %v but refers to target namespace %v", defaultNamespace, net.Namespace)
}
}
}
delegate, updatedResourceMap, err := getKubernetesDelegate(k8sclient, net, confdir, pod, resourceMap)
if err != nil {
return nil, logging.Errorf("GetNetworkDelegates: failed getting the delegate: %v", err)
}
delegates = append(delegates, delegate)
resourceMap = updatedResourceMap
}
return delegates, nil
}
func getDefaultNetDelegateCRD(client *ClientInfo, net, confdir, namespace string) (*types.DelegateNetConf, error) {
logging.Debugf("getDefaultNetDelegateCRD: %v, %v, %s, %s", client, net, confdir, namespace)
customResource, err := client.NetClient.NetworkAttachmentDefinitions(namespace).Get(net, metav1.GetOptions{})
if err != nil {
return nil, logging.Errorf("getDefaultNetDelegateCRD: failed to get network resource: %v", err)
}
configBytes, err := netutils.GetCNIConfig(customResource, confdir)
if err != nil {
return nil, err
}
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
func getNetDelegate(client *ClientInfo, netname, confdir, namespace string) (*types.DelegateNetConf, error) {
logging.Debugf("getNetDelegate: %v, %v, %v, %s", client, netname, confdir, namespace)
// option1) search CRD object for the network
delegate, err := getDefaultNetDelegateCRD(client, netname, confdir, namespace)
if err == nil {
return delegate, nil
}
// option2) search CNI json config file
var configBytes []byte
configBytes, err = netutils.GetCNIConfigFromFile(netname, confdir)
if err == nil {
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
// option3) search directry
fInfo, err := os.Stat(netname)
if err == nil {
if fInfo.IsDir() {
files, err := libcni.ConfFiles(netname, []string{".conf", ".conflist"})
if err != nil {
return nil, err
}
if len(files) > 0 {
var configBytes []byte
configBytes, err = netutils.GetCNIConfigFromFile("", netname)
if err == nil {
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
return nil, err
}
}
}
return nil, logging.Errorf("getNetDelegate: cannot find network: %v", netname)
}
// GetDefaultNetworks parses 'defaultNetwork' config, gets network json and put it into netconf.Delegates.
func GetDefaultNetworks(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient *ClientInfo) error {
logging.Debugf("GetDefaultNetworks: %v, %v, %v", k8sArgs, conf, kubeClient)
var delegates []*types.DelegateNetConf
kubeClient, err := GetK8sClient(conf.Kubeconfig, kubeClient)
if err != nil {
return err
}
if kubeClient == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return logging.Errorf("GetDefaultNetworks: must have either Kubernetes config or delegates")
}
return nil
}
delegate, err := getNetDelegate(kubeClient, conf.ClusterNetwork, conf.ConfDir, conf.MultusNamespace)
if err != nil {
return err
}
delegate.MasterPlugin = true
delegates = append(delegates, delegate)
// Pod in kube-system namespace does not have default network for now.
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAMESPACE), conf.SystemNamespaces) {
for _, netname := range conf.DefaultNetworks {
delegate, err := getNetDelegate(kubeClient, netname, conf.ConfDir, conf.MultusNamespace)
if err != nil {
return err
}
delegates = append(delegates, delegate)
}
}
if err = conf.AddDelegates(delegates); err != nil {
return err
}
return nil
}
// tryLoadK8sPodDefaultNetwork get pod default network from annotations
func tryLoadK8sPodDefaultNetwork(kubeClient *ClientInfo, pod *v1.Pod, conf *types.NetConf) (*types.DelegateNetConf, error) {
var netAnnot string
logging.Debugf("tryLoadK8sPodDefaultNetwork: %v, %v, %v", kubeClient, pod, conf)
netAnnot, ok := pod.Annotations[defaultNetAnnot]
if !ok {
logging.Debugf("tryLoadK8sPodDefaultNetwork: Pod default network annotation is not defined")
return nil, nil
}
// The CRD object of default network should only be defined in multusNamespace
networks, err := parsePodNetworkAnnotation(netAnnot, conf.MultusNamespace)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed to parse CRD object: %v", err)
}
if len(networks) > 1 {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: more than one default network is specified: %s", netAnnot)
}
delegate, _, err := getKubernetesDelegate(kubeClient, networks[0], conf.ConfDir, pod, nil)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed getting the delegate: %v", err)
}
delegate.MasterPlugin = true
return delegate, nil
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"]
|
go
| 2 | 0 | |
website/virtch/settings/base.py
|
"""Base settings"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
SITE_NAME = 'Virtch.io'
SECRET_KEY = os.environ['SECRET_KEY']
# ======== MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Dylan', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# ======== END MANAGER CONFIGURATION
# Application definition
INSTALLED_APPS = [
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# tailwind
'tailwind',
'theme',
'crispy_forms',
'crispy_tailwind',
# virtch
'virtch',
'authentication',
'world',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware', # after session and cache, before common
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'virtch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'virtch.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Toronto'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-ca'
# LANGUAGES = [
# ('en', _('English')),
# ('fr', _('French')),
# ]
# LOCALE_PATHS = ([
# 'locale'
# ])
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Tailwind styles
TAILWIND_APP_NAME = 'theme'
CRISPY_ALLOWED_TEMPLATE_PACKS = 'tailwind'
CRISPY_TEMPLATE_PACK = 'tailwind'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/srv/http/virtch-static'
# Media files (User uploaded)
MEDIA_URL = '/media/'
MEDIA_ROOT = '/srv/http/virtch-media'
# Rate limit error page
#RATELIMIT_VIEW = 'authentication.views.rate_limit_view'
# User model
AUTH_USER_MODEL = 'world.User'
# Hardening
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
CSRF_COOKIE_SAMESITE = 'Strict'
# Content Security Policy Reports
#CSP_REPORT_URI = reverse_lazy('csp-report')
#CSP_REPORTS_EMAIL_ADMINS = False
# ======== LOGGING CONFIGURATION (Lift)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'handlers': {
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler'
# },
# },
# }
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'standard': {
'format': '[%(levelname)s] %(asctime)s PID %(process)d: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'syslog': {
'format': '%(process)-5d %(thread)d %(name)-50s %(levelname)-8s %(message)s'
},
},
'handlers': {
'null': {
'class': 'logging.NullHandler',
},
'syslog': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'facility': 'local7',
'address': '/dev/log',
'formatter': 'syslog'
},
# 'debug': {
# 'level': 'DEBUG',
# 'filters': ['require_debug_true'],
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 'filename': '/var/log/uwsgi/virtch/debug.log',
# 'when': 'midnight',
# 'backupCount': 7,
# 'formatter': 'standard',
# },
# 'sql': {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 'filename': '/var/log/uwsgi/virtch/sql.log',
# 'when': 'midnight',
# 'backupCount': 7,
# 'formatter': 'standard',
# },
'request_error': {
'level': 'ERROR',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': '/var/log/uwsgi/virtch/5xx.log',
'when': 'midnight',
'backupCount': 7,
'formatter': 'standard',
}, # },
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
}
},
'loggers': {
# 'django': {
# 'handlers': ['debug'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# 'django.request': {
# 'handlers': ['request_warning', 'request_error', 'mail_admins' ],
# 'level': 'WARNING',
# 'propagate': True,
# },
#'django.db.backends': {
# 'handlers': ['sql',],
# 'level': 'DEBUG',
# 'propagate': False,
#},
# 'django.template': {
# 'handlers': ['template',],
# 'level': 'WARNING',
# 'propagate': False,
# },
# 'post_office': {
# 'handlers': ['post_office',],
# 'level': 'INFO'
# },
},
}
# # ======== END LOGGING CONFIGURATION
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
packet-tool/main.go
|
package main
import (
"fmt"
"math"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
func main() {
appType := os.Getenv("TYPE")
protocol := os.Getenv("PROTOCOL")
if strings.ToUpper(appType) == "SEND" {
send()
} else if strings.ToUpper(protocol) == "TCP" &&
strings.ToUpper(appType) == "LISTEN" {
tcpListen()
} else if strings.ToUpper(protocol) == "UDP" &&
strings.ToUpper(appType) == "LISTEN" {
udpListen()
} else {
fmt.Printf("\x1b[31m[FATAL]\x1b[0m, $TYPE should be \"SEND\" or \"LISTEN\" \n")
awaitInterrupt()
}
}
func awaitInterrupt() {
fmt.Printf("\x1b[32m[WAITING FOR INTERRUPT]\x1b[0m\n")
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
os.Exit(1)
}()
for true {
time.Sleep(math.MaxInt64)
}
os.Exit(1)
}
|
[
"\"TYPE\"",
"\"PROTOCOL\""
] |
[] |
[
"TYPE",
"PROTOCOL"
] |
[]
|
["TYPE", "PROTOCOL"]
|
go
| 2 | 0 | |
example/githubv4dev/main.go
|
// githubv4dev is a test program currently being used for developing githubv4 package.
//
// Warning: It performs some queries and mutations against real GitHub API.
//
// It's not meant to be a clean or readable example. But it's functional.
// Better, actual examples will be created in the future.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"log"
"os"
"github.com/brejoc/githubv4"
"golang.org/x/oauth2"
)
func main() {
flag.Parse()
err := run()
if err != nil {
log.Println(err)
}
}
func run() error {
src := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GITHUB_GRAPHQL_TEST_TOKEN")},
)
httpClient := oauth2.NewClient(context.Background(), src)
client := githubv4.NewClient(httpClient)
// Query some details about a repository, an issue in it, and its comments.
{
type githubV4Actor struct {
Login githubv4.String
AvatarURL githubv4.URI `graphql:"avatarUrl(size:72)"`
URL githubv4.URI
}
var q struct {
Repository struct {
DatabaseID githubv4.Int
URL githubv4.URI
Issue struct {
Author githubV4Actor
PublishedAt githubv4.DateTime
LastEditedAt *githubv4.DateTime
Editor *githubV4Actor
Body githubv4.String
ReactionGroups []struct {
Content githubv4.ReactionContent
Users struct {
Nodes []struct {
Login githubv4.String
}
TotalCount githubv4.Int
} `graphql:"users(first:10)"`
ViewerHasReacted githubv4.Boolean
}
ViewerCanUpdate githubv4.Boolean
Comments struct {
Nodes []struct {
Body githubv4.String
Author struct {
Login githubv4.String
}
Editor struct {
Login githubv4.String
}
}
PageInfo struct {
EndCursor githubv4.String
HasNextPage githubv4.Boolean
}
} `graphql:"comments(first:$commentsFirst,after:$commentsAfter)"`
} `graphql:"issue(number:$issueNumber)"`
} `graphql:"repository(owner:$repositoryOwner,name:$repositoryName)"`
Viewer struct {
Login githubv4.String
CreatedAt githubv4.DateTime
ID githubv4.ID
DatabaseID githubv4.Int
}
RateLimit struct {
Cost githubv4.Int
Limit githubv4.Int
Remaining githubv4.Int
ResetAt githubv4.DateTime
}
}
variables := map[string]interface{}{
"repositoryOwner": githubv4.String("shurcooL-test"),
"repositoryName": githubv4.String("test-repo"),
"issueNumber": githubv4.Int(1),
"commentsFirst": githubv4.NewInt(1),
"commentsAfter": githubv4.NewString("Y3Vyc29yOjE5NTE4NDI1Ng=="),
}
err := client.Query(context.Background(), &q, variables)
if err != nil {
return err
}
printJSON(q)
//goon.Dump(out)
//fmt.Println(github.Stringify(out))
}
// Toggle a 👍 reaction on an issue.
//
// That involves first doing a query (and determining whether the reaction already exists),
// then either adding or removing it.
{
var q struct {
Repository struct {
Issue struct {
ID githubv4.ID
Reactions struct {
ViewerHasReacted githubv4.Boolean
} `graphql:"reactions(content:$reactionContent)"`
} `graphql:"issue(number:$issueNumber)"`
} `graphql:"repository(owner:$repositoryOwner,name:$repositoryName)"`
}
variables := map[string]interface{}{
"repositoryOwner": githubv4.String("shurcooL-test"),
"repositoryName": githubv4.String("test-repo"),
"issueNumber": githubv4.Int(2),
"reactionContent": githubv4.ReactionContentThumbsUp,
}
err := client.Query(context.Background(), &q, variables)
if err != nil {
return err
}
fmt.Println("already reacted:", q.Repository.Issue.Reactions.ViewerHasReacted)
if !q.Repository.Issue.Reactions.ViewerHasReacted {
// Add reaction.
var m struct {
AddReaction struct {
Subject struct {
ReactionGroups []struct {
Content githubv4.ReactionContent
Users struct {
TotalCount githubv4.Int
}
}
}
} `graphql:"addReaction(input:$input)"`
}
input := githubv4.AddReactionInput{
SubjectID: q.Repository.Issue.ID,
Content: githubv4.ReactionContentThumbsUp,
}
err := client.Mutate(context.Background(), &m, input, nil)
if err != nil {
return err
}
printJSON(m)
fmt.Println("Successfully added reaction.")
} else {
// Remove reaction.
var m struct {
RemoveReaction struct {
Subject struct {
ReactionGroups []struct {
Content githubv4.ReactionContent
Users struct {
TotalCount githubv4.Int
}
}
}
} `graphql:"removeReaction(input:$input)"`
}
input := githubv4.RemoveReactionInput{
SubjectID: q.Repository.Issue.ID,
Content: githubv4.ReactionContentThumbsUp,
}
err := client.Mutate(context.Background(), &m, input, nil)
if err != nil {
return err
}
printJSON(m)
fmt.Println("Successfully removed reaction.")
}
}
return nil
}
// printJSON prints v as JSON encoded with indent to stdout. It panics on any error.
func printJSON(v interface{}) {
w := json.NewEncoder(os.Stdout)
w.SetIndent("", "\t")
err := w.Encode(v)
if err != nil {
panic(err)
}
}
|
[
"\"GITHUB_GRAPHQL_TEST_TOKEN\""
] |
[] |
[
"GITHUB_GRAPHQL_TEST_TOKEN"
] |
[]
|
["GITHUB_GRAPHQL_TEST_TOKEN"]
|
go
| 1 | 0 | |
appengine/components/components/utils.py
|
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Mixed bag of utilities."""
import binascii
import datetime
import functools
import hashlib
import inspect
import json
import logging
import os
import re
import sys
import threading
from six.moves import urllib
from email import utils as email_utils
from google.appengine import runtime
from google.appengine.api import runtime as apiruntime
from google.appengine.api import app_identity
from google.appengine.api import memcache as gae_memcache
from google.appengine.api import modules
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.appengine.runtime import apiproxy_errors
from protorpc import messages
from protorpc.remote import protojson
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DATETIME_FORMAT = u'%Y-%m-%d %H:%M:%S'
DATE_FORMAT = u'%Y-%m-%d'
VALID_DATETIME_FORMATS = ('%Y-%m-%d', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S')
# UTC datetime corresponding to zero Unix timestamp.
EPOCH = datetime.datetime.utcfromtimestamp(0)
# Module to run task queue tasks on by default. Used by get_task_queue_host
# function. Can be changed by 'set_task_queue_module' function.
_task_queue_module = 'backend'
## GAE environment
def should_disable_ui_routes():
return os.environ.get('LUCI_DISABLE_UI_ROUTES', '0') == '1'
def is_local_dev_server():
"""Returns True if running on local development server or in unit tests.
This function is safe to run outside the scope of a HTTP request.
"""
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
def is_dev():
"""Returns True if the server is running a development/staging instance.
We define a 'development instance' as an instance that has the suffix '-dev'
in its instance name.
This function is safe to run outside the scope of a HTTP request.
"""
return os.environ['APPLICATION_ID'].endswith('-dev')
def is_unit_test():
"""Returns True if running in a unit test.
Don't abuse it, use only if really desperate. For example, in a component that
is included by many-many projects across many repos, when mocking some
component behavior in all unit tests that indirectly invoke it is infeasible.
"""
if not is_local_dev_server():
return False
# devappserver2 sets up some sort of a sandbox that is not activated for
# unit tests. So differentiate based on that.
return all(
'google.appengine.tools.devappserver2' not in str(p)
for p in sys.meta_path)
def _get_memory_usage():
"""Returns the amount of memory used as an float in MiB."""
try:
return apiruntime.runtime.memory_usage().current()
except (AssertionError,
apiproxy_errors.CancelledError,
apiproxy_errors.DeadlineExceededError,
apiproxy_errors.RPCFailedError,
runtime.DeadlineExceededError) as e:
logging.warning('Failed to get memory usage: %s', e)
return None
## Handler
def get_request_as_int(request, key, default, min_value, max_value):
"""Returns a request value as int."""
value = request.params.get(key, '')
try:
value = int(value)
except ValueError:
return default
return min(max_value, max(min_value, value))
def report_memory(app):
"""Wraps an app so handlers log when memory usage increased by at least 0.5MB
after the handler completed.
"""
min_delta = 0.5
old_dispatcher = app.router.dispatch
def dispatch_and_report(*args, **kwargs):
before = _get_memory_usage()
deadline = False
try:
return old_dispatcher(*args, **kwargs)
except runtime.DeadlineExceededError:
# Don't try to call any function after, it'll likely fail anyway. It is
# because _get_memory_usage() does an RPC under the hood.
deadline = True
raise
finally:
if not deadline:
after = _get_memory_usage()
if before and after and after >= before + min_delta:
logging.debug(
'Memory usage: %.1f -> %.1f MB; delta: %.1f MB',
before, after, after-before)
app.router.dispatch = dispatch_and_report
## Time
def utcnow():
"""Returns datetime.utcnow(), used for testing.
Use this function so it can be mocked everywhere.
"""
return datetime.datetime.utcnow()
def time_time():
"""Returns the equivalent of time.time() as mocked if applicable."""
return (utcnow() - EPOCH).total_seconds()
def milliseconds_since_epoch(now):
"""Returns the number of milliseconds since unix epoch as an int."""
now = now or utcnow()
return int(round((now - EPOCH).total_seconds() * 1000.))
def datetime_to_rfc2822(dt):
"""datetime -> string value for Last-Modified header as defined by RFC2822."""
if not isinstance(dt, datetime.datetime):
raise TypeError(
'Expecting datetime object, got %s instead' % type(dt).__name__)
assert dt.tzinfo is None, 'Expecting UTC timestamp: %s' % dt
return email_utils.formatdate(datetime_to_timestamp(dt) / 1000000.0)
def datetime_to_timestamp(value):
"""Converts UTC datetime to integer timestamp in microseconds since epoch."""
if not isinstance(value, datetime.datetime):
raise ValueError(
'Expecting datetime object, got %s instead' % type(value).__name__)
if value.tzinfo is not None:
raise ValueError('Only UTC datetime is supported')
dt = value - EPOCH
return dt.microseconds + 1000 * 1000 * (dt.seconds + 24 * 3600 * dt.days)
def timestamp_to_datetime(value):
"""Converts integer timestamp in microseconds since epoch to UTC datetime."""
if not isinstance(value, (int, long, float)):
raise ValueError(
'Expecting a number, got %s instead' % type(value).__name__)
return EPOCH + datetime.timedelta(microseconds=value)
def parse_datetime(text):
"""Converts text to datetime.datetime instance or None."""
for f in VALID_DATETIME_FORMATS:
try:
return datetime.datetime.strptime(text, f)
except ValueError:
continue
return None
def parse_rfc3339_datetime(value):
"""Parses RFC 3339 datetime string (as used in Timestamp proto JSON encoding).
Keeps only microsecond precision (dropping nanoseconds).
Examples of the input:
2017-08-17T04:21:32.722952943Z
1972-01-01T10:00:20.021-05:00
Returns:
datetime.datetime in UTC (regardless of timezone of the original string).
Raises:
ValueError on errors.
"""
# Adapted from protobuf/internal/well_known_types.py Timestamp.FromJsonString.
# We can't use the original, since it's marked as internal. Also instantiating
# proto messages here to parse a string would been odd.
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ValueError('Failed to parse timestamp: missing valid timezone offset')
time_value = value[0:timezone_offset]
# Parse datetime and nanos.
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.datetime.strptime(second_value, '%Y-%m-%dT%H:%M:%S')
td = date_object - EPOCH
seconds = td.seconds + td.days * 86400
if len(nano_value) > 9:
raise ValueError(
'Failed to parse timestamp: nanos %r more than 9 fractional digits'
% nano_value)
if nano_value:
nanos = round(float('0.' + nano_value) * 1e9)
else:
nanos = 0
# Parse timezone offsets.
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ValueError(
'Failed to parse timestamp: invalid trailing data %r' % value)
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ValueError('Invalid timezone offset value: %r' % timezone)
if timezone[0] == '+':
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
return timestamp_to_datetime(int(seconds)*1e6 + int(nanos)/1e3)
def constant_time_equals(a, b):
"""Compares two strings in constant time regardless of theirs content."""
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
## Cache
class _Cache(object):
"""Holds state of a cache for cache_with_expiration and cache decorators.
May call func more than once.
Thread- and NDB tasklet-safe.
"""
def __init__(self, func, expiration_sec):
self.func = func
self.expiration_sec = expiration_sec
self.lock = threading.Lock()
self.value = None
self.value_is_set = False
self.expires = None
def get_value(self):
"""Returns a cached value refreshing it if it has expired."""
with self.lock:
if self.value_is_set and (not self.expires or time_time() < self.expires):
return self.value
new_value = self.func()
with self.lock:
self.value = new_value
self.value_is_set = True
if self.expiration_sec:
self.expires = time_time() + self.expiration_sec
return self.value
def clear(self):
"""Clears stored cached value."""
with self.lock:
self.value = None
self.value_is_set = False
self.expires = None
def get_wrapper(self):
"""Returns a callable object that can be used in place of |func|.
It's basically self.get_value, updated by functools.wraps to look more like
original function.
"""
# functools.wraps doesn't like 'instancemethod', use lambda as a proxy.
# pylint: disable=W0108
wrapper = functools.wraps(self.func)(lambda: self.get_value())
wrapper.__parent_cache__ = self
return wrapper
def cache(func):
"""Decorator that implements permanent cache of a zero-parameter function."""
return _Cache(func, None).get_wrapper()
def cache_with_expiration(expiration_sec):
"""Decorator that implements in-memory cache for a zero-parameter function."""
def decorator(func):
return _Cache(func, expiration_sec).get_wrapper()
return decorator
def clear_cache(func):
"""Given a function decorated with @cache, resets cached value."""
func.__parent_cache__.clear()
# ignore time parameter warning | pylint: disable=redefined-outer-name
def memcache_async(key, key_args=None, time=None):
"""Decorator that implements memcache-based cache for a function.
The generated cache key contains current application version and values of
|key_args| arguments converted to string using `repr`.
Args:
key (str): unique string that will be used as a part of cache key.
key_args (list of str): list of function argument names to include
in the generated cache key.
time (int): optional expiration time.
Example:
@memcache('f', ['a', 'b'])
def f(a, b=2, not_used_in_cache_key=6):
# Heavy computation
return 42
Decorator raises:
NotImplementedError if function uses varargs or kwargs.
"""
assert isinstance(key, basestring), key
key_args = key_args or []
assert isinstance(key_args, list), key_args
assert all(isinstance(a, basestring) for a in key_args), key_args
assert all(key_args), key_args
memcache_set_kwargs = {}
if time is not None:
memcache_set_kwargs['time'] = time
def decorator(func):
unwrapped = func
while True:
deeper = getattr(unwrapped, '__wrapped__', None)
if not deeper:
break
unwrapped = deeper
argspec = inspect.getargspec(unwrapped)
if argspec.varargs:
raise NotImplementedError(
'varargs in memcached functions are not supported')
if argspec.keywords:
raise NotImplementedError(
'kwargs in memcached functions are not supported')
# List of arg names and indexes. Has same order as |key_args|.
arg_indexes = []
for name in key_args:
try:
i = argspec.args.index(name)
except ValueError:
raise KeyError(
'key_format expects "%s" parameter, but it was not found among '
'function parameters' % name)
arg_indexes.append((name, i))
@functools.wraps(func)
@ndb.tasklet
def decorated(*args, **kwargs):
arg_values = []
for name, i in arg_indexes:
if i < len(args):
arg_value = args[i]
elif name in kwargs:
arg_value = kwargs[name]
else:
# argspec.defaults contains _last_ default values, so we need to shift
# |i| left.
default_value_index = i - (len(argspec.args) - len(argspec.defaults))
if default_value_index < 0:
# Parameter not provided. Call function to cause TypeError
func(*args, **kwargs)
assert False, 'Function call did not fail'
arg_value = argspec.defaults[default_value_index]
arg_values.append(arg_value)
# Instead of putting a raw value to memcache, put tuple (value,)
# so we can distinguish a cached None value and absence of the value.
cache_key = 'utils.memcache/%s/%s%s' % (
get_app_version(), key, repr(arg_values))
ctx = ndb.get_context()
result = yield ctx.memcache_get(cache_key)
if isinstance(result, tuple) and len(result) == 1:
raise ndb.Return(result[0])
result = func(*args, **kwargs)
if isinstance(result, ndb.Future):
result = yield result
yield ctx.memcache_set(cache_key, (result,), **memcache_set_kwargs)
raise ndb.Return(result)
return decorated
return decorator
def memcache(*args, **kwargs):
"""Blocking version of memcache_async."""
decorator_async = memcache_async(*args, **kwargs)
def decorator(func):
decorated_async = decorator_async(func)
@functools.wraps(func)
def decorated(*args, **kwargs):
return decorated_async(*args, **kwargs).get_result()
return decorated
return decorator
## GAE identity
@cache
def get_app_version():
"""Returns currently running version (not necessary a default one)."""
# Sadly, this causes an RPC and when called too frequently, throws quota
# errors.
return modules.get_current_version_name() or 'N/A'
@cache
def get_versioned_hosturl():
"""Returns the url hostname of this instance locked to the currently running
version.
This function hides the fact that app_identity.get_default_version_hostname()
returns None on the dev server and modules.get_hostname() returns incorrectly
qualified hostname for HTTPS usage on the prod server. <3
"""
if is_local_dev_server():
# TODO(maruel): It'd be nice if it were easier to use a ephemeral SSL
# certificate here and not assume unsecured connection.
return 'http://' + modules.get_hostname()
return 'https://%s-dot-%s' % (
get_app_version(), app_identity.get_default_version_hostname())
@cache
def get_urlfetch_service_id():
"""Returns a value for X-URLFetch-Service-Id header for GAE <-> GAE calls.
Usually it can be omitted. It is required in certain environments.
"""
if is_local_dev_server():
return 'LOCAL'
hostname = app_identity.get_default_version_hostname().split('.')
return hostname[-2].upper() if len(hostname) >= 3 else 'APPSPOT'
@cache
def get_app_revision_url():
"""Returns URL of a git revision page for currently running app version.
Works only for non-tainted versions uploaded with tools/update.py: app version
should look like '162-efaec47'. Assumes all services that use 'components'
live in a single repository.
Returns None if a version is tainted or has unexpected name.
"""
rev = re.match(r'\d+-([a-f0-9]+)$', get_app_version())
template = 'https://chromium.googlesource.com/infra/luci/luci-py/+/%s'
return template % rev.group(1) if rev else None
@cache
def get_service_account_name():
"""Same as app_identity.get_service_account_name(), but caches the result.
app_identity.get_service_account_name() does an RPC on each call, yet the
result is always the same.
"""
return app_identity.get_service_account_name()
def get_module_version_list(module_list, tainted):
"""Returns a list of pairs (module name, version name) to fetch logs for.
Arguments:
module_list: list of modules to list, defaults to all modules.
tainted: if False, excludes versions with '-tainted' in their name.
"""
result = []
if not module_list:
# If the function it called too often, it'll raise a OverQuotaError. So
# cache it for 10 minutes.
module_list = gae_memcache.get('modules_list')
if not module_list:
module_list = modules.get_modules()
gae_memcache.set('modules_list', module_list, time=10*60)
for module in module_list:
# If the function it called too often, it'll raise a OverQuotaError.
# Versions is a bit more tricky since we'll loose data, since versions are
# changed much more often than modules. So cache it for 1 minute.
key = 'modules_list-' + module
version_list = gae_memcache.get(key)
if not version_list:
version_list = modules.get_versions(module)
gae_memcache.set(key, version_list, time=60)
result.extend(
(module, v) for v in version_list if tainted or '-tainted' not in v)
return result
## Task queue
@cache
def get_task_queue_host():
"""Returns domain name of app engine instance to run a task queue task on.
By default will use 'backend' module. Can be changed by calling
set_task_queue_module during application startup.
This domain name points to a matching version of appropriate app engine
module - <version>.<module>.<app-id>.appspot.com where:
version: version of the module that is calling this function.
module: app engine module to execute task on.
That way a task enqueued from version 'A' of default module would be executed
on same version 'A' of backend module.
"""
# modules.get_hostname sometimes fails with unknown internal error.
# Cache its result in a memcache to avoid calling it too often.
cache_key = 'task_queue_host:%s:%s' % (_task_queue_module, get_app_version())
value = gae_memcache.get(cache_key)
if not value:
value = modules.get_hostname(module=_task_queue_module)
gae_memcache.set(cache_key, value)
return value
def set_task_queue_module(module):
"""Changes a module used by get_task_queue_host() function.
Should be called during application initialization if default 'backend' module
is not appropriate.
"""
global _task_queue_module
_task_queue_module = module
clear_cache(get_task_queue_host)
@ndb.tasklet
def enqueue_task_async(
url,
queue_name,
params=None,
payload=None,
name=None,
countdown=None,
use_dedicated_module=True,
version=None,
transactional=False):
"""Adds a task to a task queue.
If |use_dedicated_module| is True (default) the task will be executed by
a separate backend module instance that runs same version as currently
executing instance. If |version| is specified, the task will be executed by a
separate backend module instance of specified version. Otherwise it will run
on a current version of default module.
Returns True if the task was successfully added or a task with such name
existed before (i.e. on TombstonedTaskError exception): deduplicated task is
not a error.
Logs an error and returns False if task queue is acting up.
"""
assert not use_dedicated_module or version is None, (
'use_dedicated_module(%s) and version(%s) are both specified' % (
use_dedicated_module, version))
try:
headers = None
if use_dedicated_module:
headers = {'Host': get_task_queue_host()}
elif version is not None:
headers = {
'Host': '%s-dot-%s-dot-%s' % (
version, _task_queue_module,
app_identity.get_default_version_hostname())
}
# Note that just using 'target=module' here would redirect task request to
# a default version of a module, not the curently executing one.
task = taskqueue.Task(
url=url,
params=params,
payload=payload,
name=name,
countdown=countdown,
headers=headers)
yield task.add_async(queue_name=queue_name, transactional=transactional)
raise ndb.Return(True)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
logging.info(
'Task %r deduplicated (already exists in queue %r)',
name, queue_name)
raise ndb.Return(True)
except (
taskqueue.Error,
runtime.DeadlineExceededError,
runtime.apiproxy_errors.CancelledError,
runtime.apiproxy_errors.DeadlineExceededError,
runtime.apiproxy_errors.OverQuotaError) as e:
logging.warning(
'Problem adding task %r to task queue %r (%s): %s',
url, queue_name, e.__class__.__name__, e)
raise ndb.Return(False)
def enqueue_task(*args, **kwargs):
"""Adds a task to a task queue.
Returns:
True if the task was enqueued, False otherwise.
"""
return enqueue_task_async(*args, **kwargs).get_result()
## JSON
def to_json_encodable(data):
"""Converts data into json-compatible data."""
if isinstance(data, messages.Message):
# protojson.encode_message returns a string that is already encoded json.
# Load it back into a json-compatible representation of the data.
return json.loads(protojson.encode_message(data))
if isinstance(data, unicode) or data is None:
return data
if isinstance(data, str):
return data.decode('utf-8')
if isinstance(data, (int, float, long)):
# Note: overflowing is an issue with int and long.
return data
if isinstance(data, (list, set, tuple)):
return [to_json_encodable(i) for i in data]
if isinstance(data, dict):
assert all(isinstance(k, basestring) for k in data), data
return {
to_json_encodable(k): to_json_encodable(v) for k, v in data.items()
}
if isinstance(data, datetime.datetime):
# Convert datetime objects into a string, stripping off milliseconds. Only
# accept naive objects.
if data.tzinfo is not None:
raise ValueError('Can only serialize naive datetime instance')
return data.strftime(DATETIME_FORMAT)
if isinstance(data, datetime.date):
return data.strftime(DATE_FORMAT)
if isinstance(data, datetime.timedelta):
# Convert timedelta into seconds, stripping off milliseconds.
return int(data.total_seconds())
if hasattr(data, 'to_dict') and callable(data.to_dict):
# This takes care of ndb.Model.
return to_json_encodable(data.to_dict())
if hasattr(data, 'urlsafe') and callable(data.urlsafe):
# This takes care of ndb.Key.
return to_json_encodable(data.urlsafe())
if inspect.isgenerator(data):
return [to_json_encodable(i) for i in data]
if sys.version_info.major == 2 and isinstance(data, xrange):
# Handle it like a list. Sadly, xrange is not a proper generator so it has
# to be checked manually.
return [to_json_encodable(i) for i in data]
assert False, 'Don\'t know how to handle %r' % data
return None
def encode_to_json(data):
"""Converts any data as a json string."""
return json.dumps(
to_json_encodable(data),
sort_keys=True,
separators=(',', ':'),
encoding='utf-8')
## General
def to_units(number):
"""Convert a string to numbers."""
UNITS = ('', 'k', 'm', 'g', 't', 'p', 'e', 'z', 'y')
unit = 0
while number >= 1024.:
unit += 1
number = number / 1024.
if unit == len(UNITS) - 1:
break
if unit:
return '%.2f%s' % (number, UNITS[unit])
return '%d' % number
def validate_root_service_url(url):
"""Raises ValueError if the URL doesn't look like https://<host>."""
schemes = ('https', 'http') if is_local_dev_server() else ('https',)
parsed = urllib.parse.urlparse(url)
if parsed.scheme not in schemes:
raise ValueError('unsupported protocol %r' % str(parsed.scheme))
if not parsed.netloc:
raise ValueError('missing hostname')
stripped = urllib.parse.urlunparse((parsed[0], parsed[1], '', '', '', ''))
if stripped != url:
raise ValueError('expecting root host URL, e.g. %r)' % str(stripped))
def get_token_fingerprint(blob):
"""Given a blob with a token returns first 16 bytes of its SHA256 as hex.
It can be used to identify this particular token in logs without revealing it.
"""
assert isinstance(blob, basestring)
if isinstance(blob, unicode):
blob = blob.encode('ascii', 'ignore')
return binascii.hexlify(hashlib.sha256(blob).digest()[:16])
## Hacks
def fix_protobuf_package():
"""Modifies 'google' package to include path to 'google.protobuf' package.
Prefer our own proto package on the server. Note that this functions is not
used on the Swarming bot nor any other client.
"""
if sys.version_info.major != 2:
# Unnecessary on python3.
return
# google.__path__[0] will be google_appengine/google.
import google
if len(google.__path__) > 1:
return
# We do not mind what 'google' get used, inject protobuf in there.
path = os.path.join(THIS_DIR, 'third_party', 'protobuf', 'google')
google.__path__.append(path)
# six is needed for oauth2client and webtest (local testing).
six_path = os.path.join(THIS_DIR, 'third_party', 'six')
if six_path not in sys.path:
sys.path.insert(0, six_path)
def import_jinja2():
"""Remove any existing jinja2 package and add ours."""
if sys.version_info.major != 2:
# Unnecessary on python3.
return
for i in sys.path[:]:
if os.path.basename(i) == 'jinja2':
sys.path.remove(i)
sys.path.append(os.path.join(THIS_DIR, 'third_party'))
# NDB Futures
def async_apply(iterable, async_fn, unordered=False, concurrent_jobs=50):
"""Applies async_fn to each item and yields (item, result) tuples.
Args:
iterable: an iterable of items for which to call async_fn
async_fn: (item) => ndb.Future. It is called for each item in iterable.
unordered: False to return results in the same order as iterable.
Otherwise, yield results as soon as futures finish.
concurrent_jobs: maximum number of futures running concurrently.
"""
if unordered:
return _async_apply_unordered(iterable, async_fn, concurrent_jobs)
return _async_apply_ordered(iterable, async_fn, concurrent_jobs)
def _async_apply_ordered(iterable, async_fn, concurrent_jobs):
results = _async_apply_unordered(
enumerate(iterable), lambda i: async_fn(i[1]), concurrent_jobs)
for (_, item), result in sorted(results, key=lambda i: i[0][0]):
yield item, result
def _async_apply_unordered(iterable, async_fn, concurrent_jobs):
# maps a future to the original item(s). Items is a list because async_fn
# is allowed to return the same future for different items.
futs = {}
iterator = iter(iterable)
def launch():
running_futs = sum(1 for f in futs if not f.done())
while running_futs < concurrent_jobs:
try:
item = next(iterator)
except StopIteration:
break
future = async_fn(item)
if not future.done():
running_futs += 1
futs.setdefault(future, []).append(item)
launch()
while futs:
future = ndb.Future.wait_any(futs)
res = future.get_result()
launch() # launch more before yielding
for item in futs.pop(future):
yield item, res
def sync_of(async_fn):
"""Returns a synchronous version of an asynchronous function."""
is_static_method = isinstance(async_fn, staticmethod)
is_class_method = isinstance(async_fn, classmethod)
if is_static_method or is_class_method:
async_fn = async_fn.__func__
@functools.wraps(async_fn)
def sync(*args, **kwargs):
return async_fn(*args, **kwargs).get_result()
if is_static_method:
sync = staticmethod(sync)
elif is_class_method:
sync = classmethod(sync)
return sync
|
[] |
[] |
[
"LUCI_DISABLE_UI_ROUTES",
"SERVER_SOFTWARE",
"APPLICATION_ID"
] |
[]
|
["LUCI_DISABLE_UI_ROUTES", "SERVER_SOFTWARE", "APPLICATION_ID"]
|
python
| 3 | 0 | |
backend/manager/sso-client-registration-tool/src/main/java/org/ovirt/engine/ssoreg/core/SsoLocalConfig.java
|
package org.ovirt.engine.ssoreg.core;
import java.io.File;
import java.util.Map;
import org.ovirt.engine.core.uutils.config.ShellLikeConfd;
/**
* This class stores the local configuration (understanding local as the
* configuration of the local machine, as opposed to the global configuration
* stored in the database) of the engine loaded from the file specified by the
* <code>ENGINE_VARS</code> environment variable.
*/
public class SsoLocalConfig extends ShellLikeConfd {
// Default files for defaults and overridden values:
private static final String DEFAULTS_PATH = "/usr/share/ovirt-engine/conf/engine.conf.defaults";
private static final String VARS_PATH = "/etc/ovirt-engine/engine.conf";
// This is a singleton and this is the instance:
private static volatile SsoLocalConfig instance;
public static SsoLocalConfig getInstance() {
return getInstance(null);
}
public static SsoLocalConfig getInstance(Map<String, String> values) {
if (values != null) {
instance = new SsoLocalConfig(values);
}
else {
if (instance == null) {
synchronized(SsoLocalConfig.class) {
if (instance == null) {
instance = new SsoLocalConfig();
}
}
}
}
return instance;
}
protected SsoLocalConfig(Map<String, String> values) {
setConfig(values);
}
private SsoLocalConfig() {
String v;
String defaultsPath = System.getProperty("ovirt-engine.config.defaults", DEFAULTS_PATH);
v = System.getenv("ENGINE_DEFAULTS");
if (v != null) {
defaultsPath = v;
}
String varsPath = System.getProperty("ovirt-engine.config.vars", VARS_PATH);
v = System.getenv("ENGINE_VARS");
if (v != null) {
varsPath = v;
}
loadConfig(defaultsPath, varsPath);
}
public File getLogDir() {
return getFile("ENGINE_LOG");
}
public File getTmpDir() {
return getFile("ENGINE_TMP");
}
}
|
[
"\"ENGINE_DEFAULTS\"",
"\"ENGINE_VARS\""
] |
[] |
[
"ENGINE_VARS",
"ENGINE_DEFAULTS"
] |
[]
|
["ENGINE_VARS", "ENGINE_DEFAULTS"]
|
java
| 2 | 0 | |
pubsub/kafkapubsub/kafka_test.go
|
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kafkapubsub // import "gocloud.dev/pubsub/kafkapubsub"
// To run these tests against a real Kafka server, run localkafka.sh.
// See https://github.com/spotify/docker-kafka for more on the docker container
// that the script runs.
import (
"context"
"errors"
"fmt"
"math/rand"
"os"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"gocloud.dev/internal/testing/setup"
"gocloud.dev/pubsub"
"gocloud.dev/pubsub/driver"
"gocloud.dev/pubsub/drivertest"
)
var (
localBrokerAddrs = []string{"localhost:9092"}
// Makes OpenSubscription wait ~forever until the subscriber has joined the
// ConsumerGroup. Messages sent to the topic before the subscriber has joined
// won't be received.
subscriptionOptions = &SubscriptionOptions{WaitForJoin: 24 * time.Hour}
)
type harness struct {
uniqueID int
numSubs uint32
numTopics uint32
}
func newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) {
if !setup.HasDockerTestEnvironment() {
t.Skip("Skipping Kafka tests since the Kafka server is not available")
}
return &harness{uniqueID: rand.Int()}, nil
}
func createKafkaTopic(topicName string, partitions int32) (func(), error) {
// Create the topic.
config := MinimalConfig()
admin, err := sarama.NewClusterAdmin(localBrokerAddrs, config)
if err != nil {
return func() {}, err
}
close1 := func() { admin.Close() }
topicDetail := &sarama.TopicDetail{
NumPartitions: partitions,
ReplicationFactor: 1,
}
if err := admin.CreateTopic(topicName, topicDetail, false); err != nil {
return close1, err
}
close2 := func() {
admin.DeleteTopic(topicName)
close1()
}
return close2, nil
}
func (h *harness) CreateTopic(ctx context.Context, testName string) (driver.Topic, func(), error) {
topicName := fmt.Sprintf("%s-topic-%d-%d", sanitize(testName), h.uniqueID, atomic.AddUint32(&h.numTopics, 1))
cleanup, err := createKafkaTopic(topicName, 1)
if err != nil {
return nil, cleanup, err
}
// Open it.
dt, err := openTopic(localBrokerAddrs, MinimalConfig(), topicName, nil)
if err != nil {
return nil, cleanup, err
}
return dt, cleanup, nil
}
func (h *harness) MakeNonexistentTopic(ctx context.Context) (driver.Topic, error) {
return openTopic(localBrokerAddrs, MinimalConfig(), "nonexistent-topic", nil)
}
func (h *harness) CreateSubscription(ctx context.Context, dt driver.Topic, testName string) (driver.Subscription, func(), error) {
groupID := fmt.Sprintf("%s-sub-%d-%d", sanitize(testName), h.uniqueID, atomic.AddUint32(&h.numSubs, 1))
ds, err := openSubscription(localBrokerAddrs, MinimalConfig(), groupID, []string{dt.(*topic).topicName}, subscriptionOptions)
return ds, func() {}, err
}
func (h *harness) MakeNonexistentSubscription(ctx context.Context) (driver.Subscription, func(), error) {
ds, err := openSubscription(localBrokerAddrs, MinimalConfig(), "unused-group", []string{"nonexistent-topic"}, subscriptionOptions)
return ds, func() {}, err
}
func (h *harness) Close() {}
func (h *harness) MaxBatchSizes() (int, int) { return sendBatcherOpts.MaxBatchSize, 0 }
func (*harness) SupportsMultipleSubscriptions() bool { return true }
func TestConformance(t *testing.T) {
asTests := []drivertest.AsTest{asTest{}}
drivertest.RunConformanceTests(t, newHarness, asTests)
}
type asTest struct{}
func (asTest) Name() string {
return "kafka"
}
func (asTest) TopicCheck(topic *pubsub.Topic) error {
var sp sarama.SyncProducer
if !topic.As(&sp) {
return fmt.Errorf("cast failed for %T", sp)
}
return nil
}
func (asTest) SubscriptionCheck(sub *pubsub.Subscription) error {
var cg sarama.ConsumerGroup
if !sub.As(&cg) {
return fmt.Errorf("cast failed for %T", cg)
}
var cgs sarama.ConsumerGroupSession
if !sub.As(&cgs) {
return fmt.Errorf("cast failed for %T", cgs)
}
return nil
}
func (asTest) TopicErrorCheck(t *pubsub.Topic, err error) error {
var pe sarama.ProducerErrors
if !t.ErrorAs(err, &pe) {
return fmt.Errorf("failed to convert %v (%T)", err, err)
}
return nil
}
func (asTest) SubscriptionErrorCheck(s *pubsub.Subscription, err error) error {
var ke sarama.KError
if !s.ErrorAs(err, &ke) {
return fmt.Errorf("failed to convert %v (%T)", err, err)
}
return nil
}
func (asTest) MessageCheck(m *pubsub.Message) error {
var cm *sarama.ConsumerMessage
if !m.As(&cm) {
return fmt.Errorf("cast failed for %T", cm)
}
return nil
}
func (asTest) BeforeSend(as func(interface{}) bool) error {
var pm *sarama.ProducerMessage
if !as(&pm) {
return fmt.Errorf("cast failed for %T", &pm)
}
return nil
}
func (asTest) AfterSend(as func(interface{}) bool) error {
return nil
}
// TestKafkaKey tests sending/receiving a message with the Kafka message key set.
func TestKafkaKey(t *testing.T) {
if !setup.HasDockerTestEnvironment() {
t.Skip("Skipping Kafka tests since the Kafka server is not available")
}
const (
keyName = "kafkakey"
keyValue = "kafkakeyvalue"
)
uniqueID := rand.Int()
ctx := context.Background()
topicName := fmt.Sprintf("%s-topic-%d", sanitize(t.Name()), uniqueID)
topicCleanup, err := createKafkaTopic(topicName, 1)
defer topicCleanup()
if err != nil {
t.Fatal(err)
}
topic, err := OpenTopic(localBrokerAddrs, MinimalConfig(), topicName, &TopicOptions{KeyName: keyName})
if err != nil {
t.Fatal(err)
}
defer func() {
if err := topic.Shutdown(ctx); err != nil {
t.Error(err)
}
}()
groupID := fmt.Sprintf("%s-sub-%d", sanitize(t.Name()), uniqueID)
subOpts := *subscriptionOptions
subOpts.KeyName = keyName
sub, err := OpenSubscription(localBrokerAddrs, MinimalConfig(), groupID, []string{topicName}, &subOpts)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := sub.Shutdown(ctx); err != nil {
t.Error(err)
}
}()
m := &pubsub.Message{
Metadata: map[string]string{
"foo": "bar",
keyName: keyValue,
},
Body: []byte("hello world"),
BeforeSend: func(as func(interface{}) bool) error {
// Verify that the Key field was set correctly on the outgoing Kafka
// message.
var pm *sarama.ProducerMessage
if !as(&pm) {
return errors.New("failed to convert to ProducerMessage")
}
gotKeyBytes, err := pm.Key.Encode()
if err != nil {
return fmt.Errorf("failed to Encode Kafka Key: %v", err)
}
if gotKey := string(gotKeyBytes); gotKey != keyValue {
return errors.New("Kafka key wasn't set appropriately")
}
return nil
},
}
err = topic.Send(ctx, m)
if err != nil {
t.Fatal(err)
}
// The test will hang here if the message isn't available, so use a shorter timeout.
ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
got, err := sub.Receive(ctx2)
if err != nil {
t.Fatal(err)
}
got.Ack()
m.BeforeSend = nil // don't expect this in the received message
m.LoggableID = keyValue
if diff := cmp.Diff(got, m, cmpopts.IgnoreUnexported(pubsub.Message{})); diff != "" {
t.Errorf("got\n%v\nwant\n%v\ndiff\n%v", got, m, diff)
}
// Verify that Key was set in the received Kafka message via As.
var cm *sarama.ConsumerMessage
if !got.As(&cm) {
t.Fatal("failed to get message As ConsumerMessage")
}
if gotKey := string(cm.Key); gotKey != keyValue {
t.Errorf("got key %q want %q", gotKey, keyValue)
}
}
// TestMultiplePartionsWithRebalancing tests use of a topic with multiple
// partitions, including the rebalancing that happens when a new consumer
// appears in the group.
func TestMultiplePartionsWithRebalancing(t *testing.T) {
if !setup.HasDockerTestEnvironment() {
t.Skip("Skipping Kafka tests since the Kafka server is not available")
}
const (
keyName = "kafkakey"
nMessages = 10
)
uniqueID := rand.Int()
ctx := context.Background()
// Create a topic with 10 partitions. Using 10 instead of just 2 because
// that also tests having multiple claims.
topicName := fmt.Sprintf("%s-topic-%d", sanitize(t.Name()), uniqueID)
topicCleanup, err := createKafkaTopic(topicName, 10)
defer topicCleanup()
if err != nil {
t.Fatal(err)
}
topic, err := OpenTopic(localBrokerAddrs, MinimalConfig(), topicName, &TopicOptions{KeyName: keyName})
if err != nil {
t.Fatal(err)
}
defer func() {
if err := topic.Shutdown(ctx); err != nil {
t.Error(err)
}
}()
// Open a subscription.
groupID := fmt.Sprintf("%s-sub-%d", sanitize(t.Name()), uniqueID)
subOpts := *subscriptionOptions
subOpts.KeyName = keyName
sub, err := OpenSubscription(localBrokerAddrs, MinimalConfig(), groupID, []string{topicName}, &subOpts)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := sub.Shutdown(ctx); err != nil {
t.Error(err)
}
}()
// Send some messages.
send := func() {
for i := 0; i < nMessages; i++ {
m := &pubsub.Message{
Metadata: map[string]string{
keyName: fmt.Sprintf("key%d", i),
},
Body: []byte("hello world"),
}
if err := topic.Send(ctx, m); err != nil {
t.Fatal(err)
}
}
}
send()
// Receive the messages via the subscription.
got := make(chan int)
done := make(chan error)
read := func(ctx context.Context, subNum int, sub *pubsub.Subscription) {
for {
m, err := sub.Receive(ctx)
if err != nil {
if err == context.Canceled {
// Expected after all messages are received, no error.
done <- nil
} else {
done <- err
}
return
}
m.Ack()
got <- subNum
}
}
// The test will hang here if the messages aren't available, so use a shorter
// timeout.
ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
go read(ctx2, 0, sub)
for i := 0; i < nMessages; i++ {
select {
case <-got:
case err := <-done:
// Premature error.
if err != nil {
t.Fatal(err)
}
}
}
cancel()
if err := <-done; err != nil {
t.Fatal(err)
}
// Add another subscription to the same group. Kafka will rebalance the
// consumer group, causing the Cleanup/Setup/ConsumeClaim loop. Each of the
// two subscriptions should get claims for 50% of the partitions.
sub2, err := OpenSubscription(localBrokerAddrs, MinimalConfig(), groupID, []string{topicName}, &subOpts)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := sub2.Shutdown(ctx); err != nil {
t.Error(err)
}
}()
// Send and receive some messages.
// Now both subscriptions should get some messages.
send()
// The test will hang here if the message isn't available, so use a shorter timeout.
ctx3, cancel := context.WithTimeout(ctx, 30*time.Second)
go read(ctx3, 0, sub)
go read(ctx3, 1, sub2)
counts := []int{0, 0}
for i := 0; i < nMessages; i++ {
select {
case sub := <-got:
counts[sub]++
case err := <-done:
// Premature error.
if err != nil {
t.Fatal(err)
}
}
}
cancel()
for i := 0; i < 2; i++ {
if err := <-done; err != nil {
t.Fatal(err)
}
}
if counts[0] == 0 || counts[1] == 0 {
t.Errorf("one of the partitioned subscriptions didn't get any messages: %v", counts)
}
}
func sanitize(testName string) string {
return strings.Replace(testName, "/", "_", -1)
}
func BenchmarkKafka(b *testing.B) {
ctx := context.Background()
uniqueID := rand.Int()
// Create the topic.
topicName := fmt.Sprintf("%s-topic-%d", b.Name(), uniqueID)
cleanup, err := createKafkaTopic(topicName, 1)
defer cleanup()
if err != nil {
b.Fatal(err)
}
topic, err := OpenTopic(localBrokerAddrs, MinimalConfig(), topicName, nil)
if err != nil {
b.Fatal(err)
}
defer topic.Shutdown(ctx)
groupID := fmt.Sprintf("%s-subscription-%d", b.Name(), uniqueID)
sub, err := OpenSubscription(localBrokerAddrs, MinimalConfig(), groupID, []string{topicName}, subscriptionOptions)
if err != nil {
b.Fatal(err)
}
defer sub.Shutdown(ctx)
drivertest.RunBenchmarks(b, topic, sub)
}
func fakeConnectionStringInEnv() func() {
oldEnvVal := os.Getenv("KAFKA_BROKERS")
os.Setenv("KAFKA_BROKERS", "localhost:10000")
return func() {
os.Setenv("KAFKA_BROKERS", oldEnvVal)
}
}
func TestOpenTopicFromURL(t *testing.T) {
cleanup := fakeConnectionStringInEnv()
defer cleanup()
tests := []struct {
URL string
WantErr bool
}{
// OK, but still error because broker doesn't exist.
{"kafka://mytopic", true},
// Invalid parameter.
{"kafka://mytopic?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
topic, err := pubsub.OpenTopic(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
if topic != nil {
topic.Shutdown(ctx)
}
}
}
func TestOpenSubscriptionFromURL(t *testing.T) {
cleanup := fakeConnectionStringInEnv()
defer cleanup()
tests := []struct {
URL string
WantErr bool
}{
// OK, but still error because broker doesn't exist.
{"kafka://mygroup?topic=mytopic", false},
// OK, specifying initial offset, but still error because broker doesn't exist.
{"kafka://mygroup?topic=mytopic&offset=oldest", false},
{"kafka://mygroup?topic=mytopic&offset=newest", false},
// Invalid offset specified
{"kafka://mygroup?topic=mytopic&offset=value", true},
// Invalid parameter.
{"kafka://mygroup?topic=mytopic¶m=value", true},
}
ctx := context.Background()
const ignore = "kafka: client has run out of available brokers to talk to (Is your cluster reachable?)"
for _, test := range tests {
sub, err := pubsub.OpenSubscription(ctx, test.URL)
if err != nil && err.Error() == ignore {
// Since we don't have a real kafka broker to talk to, we will always get an error when
// opening a subscription. This test is checking specifically for query parameter usage, so
// we treat the "no brokers" error message as a nil error.
err = nil
}
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
if sub != nil {
sub.Shutdown(ctx)
}
}
}
|
[
"\"KAFKA_BROKERS\""
] |
[] |
[
"KAFKA_BROKERS"
] |
[]
|
["KAFKA_BROKERS"]
|
go
| 1 | 0 | |
molecule/default/tests/test_package.py
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_integration_package(host):
newrelic_kafka = host.package("nri-kafka")
assert newrelic_kafka.is_installed
assert newrelic_kafka.version.startswith("2.")
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
pkg/api/testapi/testapi.go
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package testapi provides a helper for retrieving the KUBE_TEST_API environment variable.
package testapi
import (
"fmt"
"os"
"reflect"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/runtime"
_ "k8s.io/kubernetes/pkg/api/install"
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/componentconfig/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
_ "k8s.io/kubernetes/pkg/apis/metrics/install"
)
var (
Groups = make(map[string]TestGroup)
Default TestGroup
Autoscaling TestGroup
Batch TestGroup
Extensions TestGroup
)
type TestGroup struct {
externalGroupVersion unversioned.GroupVersion
internalGroupVersion unversioned.GroupVersion
internalTypes map[string]reflect.Type
}
func init() {
kubeTestAPI := os.Getenv("KUBE_TEST_API")
if len(kubeTestAPI) != 0 {
testGroupVersions := strings.Split(kubeTestAPI, ",")
for _, gvString := range testGroupVersions {
groupVersion, err := unversioned.ParseGroupVersion(gvString)
if err != nil {
panic(fmt.Sprintf("Error parsing groupversion %v: %v", gvString, err))
}
internalGroupVersion := unversioned.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}
Groups[groupVersion.Group] = TestGroup{
externalGroupVersion: groupVersion,
internalGroupVersion: internalGroupVersion,
internalTypes: api.Scheme.KnownTypes(internalGroupVersion),
}
}
}
if _, ok := Groups[api.GroupName]; !ok {
Groups[api.GroupName] = TestGroup{
externalGroupVersion: unversioned.GroupVersion{Group: api.GroupName, Version: registered.GroupOrDie(api.GroupName).GroupVersion.Version},
internalGroupVersion: api.SchemeGroupVersion,
internalTypes: api.Scheme.KnownTypes(api.SchemeGroupVersion),
}
}
if _, ok := Groups[extensions.GroupName]; !ok {
Groups[extensions.GroupName] = TestGroup{
externalGroupVersion: unversioned.GroupVersion{Group: extensions.GroupName, Version: registered.GroupOrDie(extensions.GroupName).GroupVersion.Version},
internalGroupVersion: extensions.SchemeGroupVersion,
internalTypes: api.Scheme.KnownTypes(extensions.SchemeGroupVersion),
}
}
if _, ok := Groups[autoscaling.GroupName]; !ok {
internalTypes := make(map[string]reflect.Type)
for k, t := range api.Scheme.KnownTypes(extensions.SchemeGroupVersion) {
if k == "Scale" {
continue
}
internalTypes[k] = t
}
Groups[autoscaling.GroupName] = TestGroup{
externalGroupVersion: unversioned.GroupVersion{Group: autoscaling.GroupName, Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version},
internalGroupVersion: extensions.SchemeGroupVersion,
internalTypes: internalTypes,
}
}
if _, ok := Groups[autoscaling.GroupName+"IntraGroup"]; !ok {
internalTypes := make(map[string]reflect.Type)
for k, t := range api.Scheme.KnownTypes(extensions.SchemeGroupVersion) {
if k == "Scale" {
internalTypes[k] = t
break
}
}
Groups[autoscaling.GroupName] = TestGroup{
externalGroupVersion: unversioned.GroupVersion{Group: autoscaling.GroupName, Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version},
internalGroupVersion: autoscaling.SchemeGroupVersion,
internalTypes: internalTypes,
}
}
if _, ok := Groups[batch.GroupName]; !ok {
Groups[batch.GroupName] = TestGroup{
externalGroupVersion: unversioned.GroupVersion{Group: batch.GroupName, Version: registered.GroupOrDie(batch.GroupName).GroupVersion.Version},
internalGroupVersion: extensions.SchemeGroupVersion,
internalTypes: api.Scheme.KnownTypes(extensions.SchemeGroupVersion),
}
}
Default = Groups[api.GroupName]
Autoscaling = Groups[autoscaling.GroupName]
Batch = Groups[batch.GroupName]
Extensions = Groups[extensions.GroupName]
}
func (g TestGroup) ContentConfig() (string, *unversioned.GroupVersion, runtime.Codec) {
return "application/json", g.GroupVersion(), g.Codec()
}
func (g TestGroup) GroupVersion() *unversioned.GroupVersion {
copyOfGroupVersion := g.externalGroupVersion
return ©OfGroupVersion
}
// InternalGroupVersion returns the group,version used to identify the internal
// types for this API
func (g TestGroup) InternalGroupVersion() unversioned.GroupVersion {
return g.internalGroupVersion
}
// InternalTypes returns a map of internal API types' kind names to their Go types.
func (g TestGroup) InternalTypes() map[string]reflect.Type {
return g.internalTypes
}
// Codec returns the codec for the API version to test against, as set by the
// KUBE_TEST_API env var.
func (g TestGroup) Codec() runtime.Codec {
return api.Codecs.LegacyCodec(g.externalGroupVersion)
}
// Converter returns the api.Scheme for the API version to test against, as set by the
// KUBE_TEST_API env var.
func (g TestGroup) Converter() runtime.ObjectConvertor {
interfaces, err := registered.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion)
if err != nil {
panic(err)
}
return interfaces.ObjectConvertor
}
// MetadataAccessor returns the MetadataAccessor for the API version to test against,
// as set by the KUBE_TEST_API env var.
func (g TestGroup) MetadataAccessor() meta.MetadataAccessor {
interfaces, err := registered.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion)
if err != nil {
panic(err)
}
return interfaces.MetadataAccessor
}
// SelfLink returns a self link that will appear to be for the version Version().
// 'resource' should be the resource path, e.g. "pods" for the Pod type. 'name' should be
// empty for lists.
func (g TestGroup) SelfLink(resource, name string) string {
if g.externalGroupVersion.Group == api.GroupName {
if name == "" {
return fmt.Sprintf("/api/%s/%s", g.externalGroupVersion.Version, resource)
}
return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersion.Version, resource, name)
} else {
// TODO: will need a /apis prefix once we have proper multi-group
// support
if name == "" {
return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource)
}
return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name)
}
}
// Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name.
// For ex, this is of the form:
// /api/v1/watch/namespaces/foo/pods/pod0 for v1.
func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name string) string {
var path string
if g.externalGroupVersion.Group == api.GroupName {
path = "/api/" + g.externalGroupVersion.Version
} else {
// TODO: switch back once we have proper multiple group support
// path = "/apis/" + g.Group + "/" + Version(group...)
path = "/apis/" + g.externalGroupVersion.Group + "/" + g.externalGroupVersion.Version
}
if prefix != "" {
path = path + "/" + prefix
}
if namespace != "" {
path = path + "/namespaces/" + namespace
}
// Resource names are lower case.
resource = strings.ToLower(resource)
if resource != "" {
path = path + "/" + resource
}
if name != "" {
path = path + "/" + name
}
return path
}
// Returns the appropriate path for the given resource, namespace and name.
// For example, this is of the form:
// /api/v1/namespaces/foo/pods/pod0 for v1.
func (g TestGroup) ResourcePath(resource, namespace, name string) string {
return g.ResourcePathWithPrefix("", resource, namespace, name)
}
func (g TestGroup) RESTMapper() meta.RESTMapper {
return registered.RESTMapper()
}
// ExternalGroupVersions returns all external group versions allowed for the server.
func ExternalGroupVersions() []unversioned.GroupVersion {
versions := []unversioned.GroupVersion{}
for _, g := range Groups {
gv := g.GroupVersion()
versions = append(versions, *gv)
}
return versions
}
// Get codec based on runtime.Object
func GetCodecForObject(obj runtime.Object) (runtime.Codec, error) {
kind, err := api.Scheme.ObjectKind(obj)
if err != nil {
return nil, fmt.Errorf("unexpected encoding error: %v", err)
}
for _, group := range Groups {
if group.GroupVersion().Group != kind.Group {
continue
}
if api.Scheme.Recognizes(kind) {
return group.Codec(), nil
}
}
// Codec used for unversioned types
if api.Scheme.Recognizes(kind) {
serializer, ok := api.Codecs.SerializerForFileExtension("json")
if !ok {
return nil, fmt.Errorf("no serializer registered for json")
}
return serializer, nil
}
return nil, fmt.Errorf("unexpected kind: %v", kind)
}
func NewTestGroup(external, internal unversioned.GroupVersion, internalTypes map[string]reflect.Type) TestGroup {
return TestGroup{external, internal, internalTypes}
}
|
[
"\"KUBE_TEST_API\""
] |
[] |
[
"KUBE_TEST_API"
] |
[]
|
["KUBE_TEST_API"]
|
go
| 1 | 0 | |
integration/cluster_test.go
|
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"fmt"
"log"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/suteqa/etcd/client"
"github.com/suteqa/etcd/etcdserver"
"github.com/suteqa/etcd/pkg/testutil"
)
func init() {
// open microsecond-level time log for integration test debugging
log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" {
if i, err := strconv.ParseInt(t, 10, 64); err == nil {
electionTicks = int(i)
}
}
}
func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
func testCluster(t *testing.T, size int) {
defer testutil.AfterTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestTLSClusterOf3(t *testing.T) {
defer testutil.AfterTest(t)
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1) }
func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
func testClusterUsingDiscovery(t *testing.T, size int) {
defer testutil.AfterTest(t)
dc := NewCluster(t, 1)
dc.Launch(t)
defer dc.Terminate(t)
// init discovery token space
dcc := MustNewHTTPClient(t, dc.URLs(), nil)
dkapi := client.NewKeysAPI(dcc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
t.Fatal(err)
}
cancel()
c := NewClusterByConfig(
t,
&ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
)
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
defer testutil.AfterTest(t)
dc := NewCluster(t, 1)
dc.Launch(t)
defer dc.Terminate(t)
// init discovery token space
dcc := MustNewHTTPClient(t, dc.URLs(), nil)
dkapi := client.NewKeysAPI(dcc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil {
t.Fatal(err)
}
cancel()
c := NewClusterByConfig(t,
&ClusterConfig{
Size: 3,
PeerTLS: &testTLSInfo,
DiscoveryURL: dc.URL(0) + "/v2/keys"},
)
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
func testDoubleClusterSize(t *testing.T, size int) {
defer testutil.AfterTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
for i := 0; i < size; i++ {
c.AddMember(t)
}
clusterMustProgress(t, c.Members)
}
func TestDoubleTLSClusterSizeOf3(t *testing.T) {
defer testutil.AfterTest(t)
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
c.Launch(t)
defer c.Terminate(t)
for i := 0; i < 3; i++ {
c.AddMember(t)
}
clusterMustProgress(t, c.Members)
}
func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
func testDecreaseClusterSize(t *testing.T, size int) {
defer testutil.AfterTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
// TODO: remove the last but one member
for i := 0; i < size-1; i++ {
id := c.Members[len(c.Members)-1].s.ID()
// may hit second leader election on slow machines
if err := c.removeMember(t, uint64(id)); err != nil {
if strings.Contains(err.Error(), "no leader") {
t.Logf("got leader error (%v)", err)
i--
continue
}
t.Fatal(err)
}
c.waitLeader(t, c.Members)
}
clusterMustProgress(t, c.Members)
}
func TestForceNewCluster(t *testing.T) {
c := NewCluster(t, 3)
c.Launch(t)
cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
resp, err := kapi.Create(ctx, "/foo", "bar")
if err != nil {
t.Fatalf("unexpected create error: %v", err)
}
cancel()
// ensure create has been applied in this machine
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
t.Fatalf("unexpected watch error: %v", err)
}
cancel()
c.Members[0].Stop(t)
c.Members[1].Terminate(t)
c.Members[2].Terminate(t)
c.Members[0].ForceNewCluster = true
err = c.Members[0].Restart(t)
if err != nil {
t.Fatalf("unexpected ForceRestart error: %v", err)
}
defer c.Members[0].Terminate(t)
c.waitLeader(t, c.Members[:1])
// use new http client to init new connection
cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
kapi = client.NewKeysAPI(cc)
// ensure force restart keep the old data, and new cluster can make progress
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
t.Fatalf("unexpected watch error: %v", err)
}
cancel()
clusterMustProgress(t, c.Members[:1])
}
func TestAddMemberAfterClusterFullRotation(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
// remove all the previous three members and add in three new members.
for i := 0; i < 3; i++ {
c.RemoveMember(t, uint64(c.Members[0].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
}
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure we can remove a member then add a new one back immediately.
func TestIssue2681(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 5)
c.Launch(t)
defer c.Terminate(t)
c.RemoveMember(t, uint64(c.Members[4].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure we can remove a member after a snapshot then add a new one back.
func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
// With 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot.
func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
func testIssue2746(t *testing.T, members int) {
defer testutil.AfterTest(t)
c := NewCluster(t, members)
for _, m := range c.Members {
m.SnapshotCount = 10
}
c.Launch(t)
defer c.Terminate(t)
// force a snapshot
for i := 0; i < 20; i++ {
clusterMustProgress(t, c.Members)
}
c.RemoveMember(t, uint64(c.Members[members-1].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure etcd will not panic when removing a just started member.
func TestIssue2904(t *testing.T) {
defer testutil.AfterTest(t)
// start 1-member cluster to ensure member 0 is the leader of the cluster.
c := NewCluster(t, 1)
c.Launch(t)
defer c.Terminate(t)
c.AddMember(t)
c.Members[1].Stop(t)
// send remove member-1 request to the cluster.
cc := MustNewHTTPClient(t, c.URLs(), nil)
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
// the proposal is not committed because member 1 is stopped, but the
// proposal is appended to leader's raft log.
ma.Remove(ctx, c.Members[1].s.ID().String())
cancel()
// restart member, and expect it to send UpdateAttributes request.
// the log in the leader is like this:
// [..., remove 1, ..., update attr 1, ...]
c.Members[1].Restart(t)
// when the member comes back, it ack the proposal to remove itself,
// and apply it.
<-c.Members[1].s.StopNotify()
// terminate removed member
c.Members[1].Terminate(t)
c.Members = c.Members[:1]
// wait member to be removed.
c.waitMembersMatch(t, c.HTTPMembers())
}
// TestIssue3699 tests minority failure during cluster configuration; it was
// deadlocking.
func TestIssue3699(t *testing.T) {
// start a cluster of 3 nodes a, b, c
defer testutil.AfterTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
// make node a unavailable
c.Members[0].Stop(t)
// add node d
c.AddMember(t)
// electing node d as leader makes node a unable to participate
leaderID := c.waitLeader(t, c.Members)
for leaderID != 3 {
c.Members[leaderID].Stop(t)
<-c.Members[leaderID].s.StopNotify()
// do not restart the killed member immediately.
// the member will advance its election timeout after restart,
// so it will have a better chance to become the leader again.
time.Sleep(time.Duration(electionTicks * int(tickDuration)))
c.Members[leaderID].Restart(t)
leaderID = c.waitLeader(t, c.Members)
}
// bring back node a
// node a will remain useless as long as d is the leader.
if err := c.Members[0].Restart(t); err != nil {
t.Fatal(err)
}
select {
// waiting for ReadyNotify can take several seconds
case <-time.After(10 * time.Second):
t.Fatalf("waited too long for ready notification")
case <-c.Members[0].s.StopNotify():
t.Fatalf("should not be stopped")
case <-c.Members[0].s.ReadyNotify():
}
// must waitLeader so goroutines don't leak on terminate
c.waitLeader(t, c.Members)
// try to participate in cluster
cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil {
t.Fatalf("unexpected error on Set (%v)", err)
}
cancel()
}
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
func TestRejectUnhealthyAdd(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 3)
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
c.Launch(t)
defer c.Terminate(t)
// make cluster unhealthy and wait for downed peer
c.Members[0].Stop(t)
c.WaitLeader(t)
// all attempts to add member should fail
for i := 1; i < len(c.Members); i++ {
err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345")
if err == nil {
t.Fatalf("should have failed adding peer")
}
// TODO: client should return descriptive error codes for internal errors
if !strings.Contains(err.Error(), "has no leader") {
t.Errorf("unexpected error (%v)", err)
}
}
// make cluster healthy
c.Members[0].Restart(t)
c.WaitLeader(t)
time.Sleep(2 * etcdserver.HealthInterval)
// add member should succeed now that it's healthy
var err error
for i := 1; i < len(c.Members); i++ {
if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
break
}
}
if err != nil {
t.Fatalf("should have added peer to healthy cluster (%v)", err)
}
}
// TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
// if quorum will be lost.
func TestRejectUnhealthyRemove(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 5)
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
c.Launch(t)
defer c.Terminate(t)
// make cluster unhealthy and wait for downed peer; (3 up, 2 down)
c.Members[0].Stop(t)
c.Members[1].Stop(t)
c.WaitLeader(t)
// reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum
err := c.removeMember(t, uint64(c.Members[2].s.ID()))
if err == nil {
t.Fatalf("should reject quorum breaking remove")
}
// TODO: client should return more descriptive error codes for internal errors
if !strings.Contains(err.Error(), "has no leader") {
t.Errorf("unexpected error (%v)", err)
}
// member stopped after launch; wait for missing heartbeats
time.Sleep(time.Duration(electionTicks * int(tickDuration)))
// permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
t.Fatalf("should accept removing down member")
}
// bring cluster to (4,1)
c.Members[0].Restart(t)
// restarted member must be connected for a HealthInterval before remove is accepted
time.Sleep((3 * etcdserver.HealthInterval) / 2)
// accept remove member since (4,1)-(1,0) => (3,1) has quorum
if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
t.Fatalf("expected to remove member, got error %v", err)
}
}
// TestRestartRemoved ensures that restarting removed member must exit
// if 'initial-cluster-state' is set 'new' and old data directory still exists
// (see https://github.com/etcd-io/etcd/issues/7512 for more).
func TestRestartRemoved(t *testing.T) {
defer testutil.AfterTest(t)
// 1. start single-member cluster
c := NewCluster(t, 1)
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
c.Launch(t)
defer c.Terminate(t)
// 2. add a new member
c.AddMember(t)
c.WaitLeader(t)
oldm := c.Members[0]
oldm.keepDataDirTerminate = true
// 3. remove first member, shut down without deleting data
if err := c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
t.Fatalf("expected to remove member, got error %v", err)
}
c.WaitLeader(t)
// 4. restart first member with 'initial-cluster-state=new'
// wrong config, expects exit within ReqTimeout
oldm.ServerConfig.NewCluster = false
if err := oldm.Restart(t); err != nil {
t.Fatalf("unexpected ForceRestart error: %v", err)
}
defer func() {
oldm.Close()
os.RemoveAll(oldm.ServerConfig.DataDir)
}()
select {
case <-oldm.s.StopNotify():
case <-time.After(time.Minute):
t.Fatalf("removed member didn't exit within %v", time.Minute)
}
}
// clusterMustProgress ensures that cluster can make progress. It creates
// a random key first, and check the new key could be got from all client urls
// of the cluster.
func clusterMustProgress(t *testing.T, membs []*member) {
cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
kapi := client.NewKeysAPI(cc)
key := fmt.Sprintf("foo%d", rand.Int())
var (
err error
resp *client.Response
)
// retry in case of leader loss induced by slow CI
for i := 0; i < 3; i++ {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
resp, err = kapi.Create(ctx, "/"+key, "bar")
cancel()
if err == nil {
break
}
t.Logf("failed to create key on %q (%v)", membs[0].URL(), err)
}
if err != nil {
t.Fatalf("create on %s error: %v", membs[0].URL(), err)
}
for i, m := range membs {
u := m.URL()
mcc := MustNewHTTPClient(t, []string{u}, nil)
mkapi := client.NewKeysAPI(mcc)
mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil {
t.Fatalf("#%d: watch on %s error: %v", i, u, err)
}
mcancel()
}
}
func TestSpeedyTerminate(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
// Stop/Restart so requests will time out on lost leaders
for i := 0; i < 3; i++ {
clus.Members[i].Stop(t)
clus.Members[i].Restart(t)
}
donec := make(chan struct{})
go func() {
defer close(donec)
clus.Terminate(t)
}()
select {
case <-time.After(10 * time.Second):
t.Fatalf("cluster took too long to terminate")
case <-donec:
}
}
|
[
"\"ETCD_ELECTION_TIMEOUT_TICKS\""
] |
[] |
[
"ETCD_ELECTION_TIMEOUT_TICKS"
] |
[]
|
["ETCD_ELECTION_TIMEOUT_TICKS"]
|
go
| 1 | 0 | |
lte/gateway/python/magma/pipelined/app/enforcement_stats.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import os
from collections import defaultdict, namedtuple
from datetime import datetime, timedelta
from subprocess import check_output
import ryu.app.ofctl.api as ofctl_api
from lte.protos.pipelined_pb2 import RuleModResult
from lte.protos.policydb_pb2 import FlowDescription
from lte.protos.session_manager_pb2 import (
RuleRecord,
RuleRecordTable,
UPFSessionState,
)
from magma.common.sentry import EXCLUDE_FROM_ERROR_MONITORING
from magma.pipelined.app.base import (
ControllerType,
MagmaController,
global_epoch,
)
from magma.pipelined.app.policy_mixin import (
DROP_FLOW_STATS,
IGNORE_STATS,
PROCESS_STATS,
PolicyMixin,
)
from magma.pipelined.app.restart_mixin import DefaultMsgsMap, RestartMixin
from magma.pipelined.imsi import decode_imsi, encode_imsi
from magma.pipelined.ipv6_prefix_store import get_ipv6_prefix
from magma.pipelined.ng_manager.session_state_manager import SessionStateManager
from magma.pipelined.openflow import flows
from magma.pipelined.openflow.exceptions import (
MagmaDPDisconnectedError,
MagmaOFError,
)
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.messages import MessageHub, MsgChannel
from magma.pipelined.openflow.registers import (
DIRECTION_REG,
IMSI_REG,
NG_SESSION_ID_REG,
REG_ZERO_VAL,
RULE_NUM_REG,
RULE_VERSION_REG,
SCRATCH_REGS,
Direction,
)
from magma.pipelined.policy_converters import (
convert_ipv4_str_to_ip_proto,
convert_ipv6_str_to_ip_proto,
get_eth_type,
get_ue_ip_match_args,
)
from magma.pipelined.utils import Utils
from ryu.app.ofctl.exception import (
InvalidDatapath,
OFError,
UnexpectedMultiReply,
)
from ryu.controller import dpset, ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib import hub
from ryu.ofproto.ofproto_v1_4 import OFPMPF_REPLY_MORE
ETH_FRAME_SIZE_BYTES = 14
class EnforcementStatsController(PolicyMixin, RestartMixin, MagmaController):
"""
This openflow controller installs flows for aggregating policy usage
statistics, which are sent to sessiond for tracking.
It periodically polls OVS for flow stats on the its table and reports the
usage records to session manager via RPC. Flows are deleted when their
version (reg4 match) is different from the current version of the rule for
the subscriber maintained by the rule version mapper.
"""
APP_NAME = 'enforcement_stats'
APP_TYPE = ControllerType.LOGICAL
SESSIOND_RPC_TIMEOUT = 10
# 0xffffffffffffffff is reserved in openflow
DEFAULT_FLOW_COOKIE = 0xfffffffffffffffe
INIT_SLEEP_TIME = 3
MAX_DELAY_INTERVALS = 20
ng_config = namedtuple(
'ng_config',
['ng_service_enabled', 'sessiond_setinterface'],
)
_CONTEXTS = {
'dpset': dpset.DPSet,
}
def __init__(self, *args, **kwargs):
super(EnforcementStatsController, self).__init__(*args, **kwargs)
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_next_table_num(self.APP_NAME)
self.dpset = kwargs['dpset']
self.loop = kwargs['loop']
# Spawn a thread to poll for flow stats
poll_interval = kwargs['config']['enforcement']['poll_interval']
# Create a rpc channel to sessiond
self.sessiond = kwargs['rpc_stubs']['sessiond']
self._msg_hub = MessageHub(self.logger)
self.unhandled_stats_msgs = [] # Store multi-part responses from ovs
self.total_usage = {} # Store total usage
self._clean_restart = kwargs['config']['clean_restart']
self._redis_enabled = kwargs['config'].get('redis_enabled', False)
self._unmatched_bytes = 0 # Store bytes matched by default rule if any
self._default_drop_flow_name = \
kwargs['config']['enforcement']['default_drop_flow_name']
self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
self._print_grpc_payload = os.environ.get('MAGMA_PRINT_GRPC_PAYLOAD')
self._last_poll_time = datetime.now()
self._last_report_timestamp = datetime.now()
self._bridge_name = kwargs['config']['bridge_name']
self._periodic_stats_reporting = kwargs['config']['enforcement'].get('periodic_stats_reporting', True)
if self._print_grpc_payload is None:
self._print_grpc_payload = \
kwargs['config'].get('magma_print_grpc_payload', False)
self._restart_info_store = kwargs['restart_info_store']
self._ovs_restarted = self._was_ovs_restarted()
self.ng_config = self._get_ng_config(kwargs['config'], kwargs['rpc_stubs'])
self._prefix_mapper = kwargs['interface_to_prefix_mapper']
def _get_ng_config(self, config_dict, rpc_stub_dict):
ng_service_enabled = config_dict.get('enable5g_features', None)
sessiond_setinterface = rpc_stub_dict.get('sessiond_setinterface')
return self.ng_config(ng_service_enabled=ng_service_enabled, sessiond_setinterface=sessiond_setinterface)
def delete_all_flows(self, datapath):
flows.delete_all_flows_from_table(datapath, self.tbl_num)
def cleanup_state(self):
"""
When we remove/reinsert flows we need to remove old usage maps as new
flows will have reset stat counters
"""
self.unhandled_stats_msgs = []
self.total_usage = {}
self._unmatched_bytes = 0
def initialize_on_connect(self, datapath):
"""
Install the default flows on datapath connect event.
Args:
datapath: ryu datapath struct
"""
self._datapath = datapath
def _get_default_flow_msgs(self, datapath) -> DefaultMsgsMap:
"""
Gets the default flow msg that drops traffic
Args:
datapath: ryu datapath struct
Returns:
The list of default msgs to add
"""
match = MagmaMatch()
msg = flows.get_add_drop_flow_msg(
datapath, self.tbl_num, match,
priority=flows.MINIMUM_PRIORITY,
cookie=self.DEFAULT_FLOW_COOKIE,
)
return {self.tbl_num: [msg]}
def cleanup_on_disconnect(self, datapath):
"""
Cleanup flows on datapath disconnect event.
Args:
datapath: ryu datapath struct
"""
if self._clean_restart:
self.delete_all_flows(datapath)
def _install_flow_for_rule(
self, imsi, msisdn: bytes, uplink_tunnel: int, ip_addr, apn_ambr, rule, version, shard_id,
local_f_teid_ng: int,
):
"""
Install a flow to get stats for a particular rule. Flows will match on
IMSI, cookie (the rule num), in/out direction
Args:
imsi (string): subscriber to install rule for
msisdn (bytes): subscriber MSISDN
uplink_tunnel (int): tunnel ID of the subscriber.
ip_addr (string): subscriber session ipv4 address
rule (PolicyRule): policy rule proto
"""
def fail(err):
self.logger.error(
"Failed to install rule %s for subscriber %s: %s",
rule.id, imsi, err,
)
return RuleModResult.FAILURE
msgs = self._get_rule_match_flow_msgs(
imsi, msisdn, uplink_tunnel,
ip_addr, apn_ambr, rule, version, shard_id,
local_f_teid_ng,
)
try:
chan = self._msg_hub.send(msgs, self._datapath)
except MagmaDPDisconnectedError:
self.logger.error(
"Datapath disconnected, failed to install rule %s"
"for imsi %s", rule, imsi, extra=EXCLUDE_FROM_ERROR_MONITORING,
)
return RuleModResult.FAILURE
for _ in range(len(msgs)):
try:
result = chan.get()
except MsgChannel.Timeout:
return fail("No response from OVS")
if not result.ok():
return fail(result.exception())
return RuleModResult.SUCCESS
@set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
def _handle_barrier(self, ev):
self._msg_hub.handle_barrier(ev)
@set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
def _handle_error(self, ev):
self._msg_hub.handle_error(ev)
# pylint: disable=protected-access,unused-argument
def _get_rule_match_flow_msgs(self, imsi, _, __, ip_addr, ambr, rule, version, shard_id, local_f_teid_ng):
"""
Returns flow add messages used for rule matching.
"""
rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
self.logger.debug(
'Installing flow for %s with rule num %s (version %s)', imsi,
rule_num, version,
)
inbound_rule_match = _generate_rule_match(
imsi, ip_addr, rule_num,
version, Direction.IN,
local_f_teid_ng,
)
outbound_rule_match = _generate_rule_match(
imsi, ip_addr, rule_num,
version, Direction.OUT,
local_f_teid_ng,
)
flow_actions = [flow.action for flow in rule.flow_list]
msgs = []
if FlowDescription.PERMIT in flow_actions:
inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = PROCESS_STATS
outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = PROCESS_STATS
msgs.extend([
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
inbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
outbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
])
else:
inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = DROP_FLOW_STATS
outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = DROP_FLOW_STATS
msgs.extend([
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
inbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
outbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
])
if rule.app_name:
inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = IGNORE_STATS
outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = IGNORE_STATS
msgs.extend([
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
inbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
outbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
])
return msgs
def _get_default_flow_msgs_for_subscriber(self, imsi, ip_addr, local_f_teid_ng):
match_in = _generate_rule_match(
imsi, ip_addr, 0, 0,
Direction.IN, local_f_teid_ng,
)
match_out = _generate_rule_match(
imsi, ip_addr, 0, 0,
Direction.OUT, local_f_teid_ng,
)
return [
flows.get_add_drop_flow_msg(
self._datapath, self.tbl_num, match_in,
priority=Utils.DROP_PRIORITY,
),
flows.get_add_drop_flow_msg(
self._datapath, self.tbl_num, match_out,
priority=Utils.DROP_PRIORITY,
),
]
def _install_redirect_flow(self, imsi, ip_addr, rule, version):
pass
def _install_default_flow_for_subscriber(self, imsi, ip_addr, local_f_teid_ng):
"""
Add a low priority flow to drop a subscriber's traffic.
Args:
imsi (string): subscriber id
ip_addr (string): subscriber ip_addr
"""
msgs = self._get_default_flow_msgs_for_subscriber(imsi, ip_addr, local_f_teid_ng)
if msgs:
chan = self._msg_hub.send(msgs, self._datapath)
self._wait_for_responses(chan, len(msgs))
def get_policy_usage(self, fut):
record_table = RuleRecordTable(
records=self.total_usage.values(),
epoch=global_epoch,
)
fut.set_result(record_table)
def _monitor(self, poll_interval):
"""
Main thread that sends a stats request at the configured interval in
seconds.
"""
while not self.init_finished:
# Still send an empty report -> for pipelined setup
self._report_usage({})
hub.sleep(self.INIT_SLEEP_TIME)
if not self._periodic_stats_reporting:
return
while True:
hub.sleep(poll_interval)
now = datetime.now()
delta = get_adjusted_delta(self._last_report_timestamp, now)
if delta > poll_interval * self.MAX_DELAY_INTERVALS:
self.logger.info(
'Previous update missing, current time %s, last '
'report timestamp %s, last poll timestamp %s',
now.strftime("%H:%M:%S"),
self._last_report_timestamp.strftime("%H:%M:%S"),
self._last_poll_time.strftime("%H:%M:%S"),
)
self._last_report_timestamp = now
hub.sleep(poll_interval / 2)
continue
if delta < poll_interval:
continue
self._last_poll_time = now
self.logger.debug(
'Started polling: %s',
now.strftime("%H:%M:%S"),
)
self._poll_stats(self._datapath)
def _poll_stats(self, datapath, cookie: int = 0, cookie_mask: int = 0):
"""
Send a FlowStatsRequest message to the datapath
Raises:
MagmaOFError: if we can't poll datapath stats
"""
try:
flows.send_stats_request(
datapath, self.tbl_num,
cookie, cookie_mask,
)
except MagmaOFError as e:
self.logger.warning("Couldn't poll datapath stats: %s", e)
except Exception as e: # pylint: disable=broad-except
self.logger.warning("Couldn't poll datapath stats: %s", e)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Schedule the flow stats handling in the main event loop, so as to
unblock the ryu event loop
"""
if not self.init_finished:
self.logger.debug('Setup not finished, skipping stats reply')
return
if self._datapath_id != ev.msg.datapath.id:
self.logger.debug('Ignoring stats from different bridge')
return
self.unhandled_stats_msgs.append(ev.msg.body)
if ev.msg.flags == OFPMPF_REPLY_MORE:
# Wait for more multi-part responses thats received for the
# single stats request.
return
self.loop.call_soon_threadsafe(
self._handle_flow_stats, self.unhandled_stats_msgs,
)
self.unhandled_stats_msgs = []
def _handle_flow_stats(self, stats_msgs):
"""
Aggregate flow stats by rule, and report to session manager
"""
stat_count = sum(len(flow_stats) for flow_stats in stats_msgs)
if stat_count == 0:
return
self.logger.debug("Processing %s stats responses", len(stats_msgs))
# Aggregate flows into rule records
aggregated_msgs = []
for flow_stats in stats_msgs:
aggregated_msgs += flow_stats
self.logger.debug("Processing stats of %d flows", len(aggregated_msgs))
try:
current_usage = self._get_usage_from_flow_stat(aggregated_msgs)
except ConnectionError:
self.logger.error('Failed processing stats, redis unavailable')
self.unhandled_stats_msgs.append(stats_msgs)
return
# Send report even if usage is empty. Sessiond uses empty reports to
# recognize when flows have ended
self._report_usage(current_usage)
# This is done primarily for CWF integration tests, TODO rm
self.total_usage = current_usage
# Report only if their is no change in version
if self.ng_config.ng_service_enabled == True:
self._prepare_session_config_report(stats_msgs)
def deactivate_default_flow(self, imsi, ip_addr, local_f_teid_ng=0):
if self._datapath is None:
self.logger.error('Datapath not initialized')
return
match_in = _generate_rule_match(
imsi, ip_addr, 0, 0,
Direction.IN, local_f_teid_ng,
)
match_out = _generate_rule_match(
imsi, ip_addr, 0, 0,
Direction.OUT, local_f_teid_ng,
)
flows.delete_flow(self._datapath, self.tbl_num, match_in)
flows.delete_flow(self._datapath, self.tbl_num, match_out)
def _report_usage(self, usage):
"""
Report usage to sessiond using rpc
"""
record_table = RuleRecordTable(
records=usage.values(),
epoch=global_epoch,
update_rule_versions=self._ovs_restarted,
)
if self._print_grpc_payload:
record_msg = 'Sending RPC payload: {0}{{\n{1}}}'.format(
record_table.DESCRIPTOR.name, str(record_table),
)
self.logger.info(record_msg)
future = self.sessiond.ReportRuleStats.future(
record_table, self.SESSIOND_RPC_TIMEOUT,
)
future.add_done_callback(
lambda future: self.loop.call_soon_threadsafe(
self._report_usage_done, future, usage.values(),
),
)
def _report_usage_done(self, future, records):
"""
Callback after sessiond RPC completion
"""
self._last_report_timestamp = datetime.now()
self.logger.debug(
'Finished reporting: %s',
self._last_report_timestamp.strftime("%H:%M:%S"),
)
err = future.exception()
if err:
self.logger.error('Couldnt send flow records to sessiond: %s', err)
return
try:
self._delete_old_flows(records)
except ConnectionError:
self.logger.error('Failed remove old flows, redis unavailable')
return
def _get_usage_from_flow_stat(self, flow_stats):
"""
Update the rule record map with the flow stat and return the
updated map.
"""
current_usage = defaultdict(RuleRecord)
for flow_stat in flow_stats:
if flow_stat.table_id != self.tbl_num:
# this update is not intended for policy
continue
rule_id = self._get_rule_id(flow_stat)
# Rule not found, must be default flow
if rule_id == "":
default_flow_matched = \
flow_stat.cookie == self.DEFAULT_FLOW_COOKIE
if default_flow_matched:
if flow_stat.byte_count != 0 and \
self._unmatched_bytes != flow_stat.byte_count:
self.logger.debug(
'%s bytes total not reported.',
flow_stat.byte_count,
)
self._unmatched_bytes = flow_stat.byte_count
continue
else:
# This must be the default drop flow
rule_id = self._default_drop_flow_name
# If this is a pass through app name flow ignore stats
if _get_policy_type(flow_stat.match) == IGNORE_STATS:
continue
sid = _get_sid(flow_stat)
if not sid:
continue
ipv4_addr = _get_ipv4(flow_stat)
ipv6_addr = self._get_ipv6(flow_stat)
local_f_teid_ng = _get_ng_local_f_id(flow_stat)
# use a compound key to separate flows for the same rule but for
# different subscribers
key = sid + "|" + rule_id
if ipv4_addr:
key += "|" + ipv4_addr
elif ipv6_addr:
key += "|" + ipv6_addr
rule_version = _get_version(flow_stat)
if not rule_version:
rule_version = 0
key += "|" + str(rule_version)
current_usage[key].rule_id = rule_id
current_usage[key].sid = sid
current_usage[key].rule_version = rule_version
if ipv4_addr:
current_usage[key].ue_ipv4 = ipv4_addr
elif ipv6_addr:
current_usage[key].ue_ipv6 = ipv6_addr
if local_f_teid_ng:
current_usage[key].teid = local_f_teid_ng
bytes_rx = 0
bytes_tx = 0
if flow_stat.match[DIRECTION_REG] == Direction.IN:
# HACK decrement byte count for downlink packets by the length
# of an ethernet frame. Only IP and below should be counted towards
# a user's data. Uplink does this already because the GTP port is
# an L3 port.
bytes_rx = _get_downlink_byte_count(flow_stat)
else:
bytes_tx = flow_stat.byte_count
if _get_policy_type(flow_stat.match) == PROCESS_STATS:
current_usage[key].bytes_rx += bytes_rx
current_usage[key].bytes_tx += bytes_tx
else:
current_usage[key].dropped_rx += bytes_rx
current_usage[key].dropped_tx += bytes_tx
return current_usage
def _delete_old_flows(self, records):
"""
Check if the version of any record is older than the current version.
If so, delete the flow.
"""
for record in records:
ip_addr = None
if record.ue_ipv4:
ip_addr = convert_ipv4_str_to_ip_proto(record.ue_ipv4)
elif record.ue_ipv6:
ip_addr = convert_ipv6_str_to_ip_proto(record.ue_ipv6)
current_ver = self._session_rule_version_mapper.get_version(
record.sid, ip_addr, record.rule_id,
)
local_f_teid_ng = 0
if record.teid:
local_f_teid_ng = record.teid
if current_ver == record.rule_version:
continue
try:
self._delete_flow(
record.sid, ip_addr,
record.rule_id, record.rule_version, local_f_teid_ng,
)
except MagmaOFError as e:
self.logger.error(
'Failed to delete rule %s for subscriber %s ('
'version: %s): %s', record.rule_id,
record.sid, record.rule_version, e,
)
def _delete_flow(self, imsi, ip_addr, rule_id, rule_version, local_f_teid_ng=0):
rule_num = self._rule_mapper.get_or_create_rule_num(rule_id)
match_in = _generate_rule_match(
imsi, ip_addr, rule_num, rule_version,
Direction.IN, local_f_teid_ng,
)
match_out = _generate_rule_match(
imsi, ip_addr, rule_num, rule_version,
Direction.OUT, local_f_teid_ng,
)
flows.delete_flow(
self._datapath,
self.tbl_num,
match_in,
)
flows.delete_flow(
self._datapath,
self.tbl_num,
match_out,
)
def _was_ovs_restarted(self):
try:
ovs_pid = int(check_output(["pidof", "ovs-vswitchd"]).decode())
except Exception as e: # pylint: disable=broad-except
self.logger.warning("Couldn't get ovs pid: %s", e)
ovs_pid = 0
stored_ovs_pid = self._restart_info_store["ovs-vswitchd"]
self._restart_info_store["ovs-vswitchd"] = ovs_pid
self.logger.info(
"Stored ovs_pid %d, new ovs pid %d",
stored_ovs_pid, ovs_pid,
)
return ovs_pid != stored_ovs_pid
def _get_rule_id(self, flow):
"""
Return the rule id from the rule cookie
"""
# the default rule will have a cookie of 0
rule_num = flow.match.get(RULE_NUM_REG, 0)
if rule_num == 0 or rule_num == self.DEFAULT_FLOW_COOKIE:
return ""
try:
return self._rule_mapper.get_rule_id(rule_num)
except KeyError as e:
self.logger.error(
'Could not find rule id for num %d: %s',
rule_num, e,
)
return ""
def get_stats(self, cookie: int = 0, cookie_mask: int = 0):
"""
Use Ryu API to send a stats request containing cookie and cookie mask, retrieve a response and
convert to a Rule Record Table and remove old flows
"""
if not self._datapath:
self.logger.error("Could not initialize datapath for stats retrieval")
return RuleRecordTable()
parser = self._datapath.ofproto_parser
message = parser.OFPFlowStatsRequest(
datapath=self._datapath,
table_id=self.tbl_num,
cookie=cookie,
cookie_mask=cookie_mask,
)
try:
response = ofctl_api.send_msg(
self, message, reply_cls=parser.OFPFlowStatsReply,
reply_multi=True,
)
if not response:
self.logger.error("No rule records match the specified cookie and cookie mask")
return RuleRecordTable()
aggregated_msgs = []
for r in response:
aggregated_msgs += r.body
usage = self._get_usage_from_flow_stat(aggregated_msgs)
self.loop.call_soon_threadsafe(self._delete_old_flows, usage.values())
record_table = RuleRecordTable(
records=usage.values(),
epoch=global_epoch,
)
return record_table
except (InvalidDatapath, OFError, UnexpectedMultiReply):
self.logger.error("Could not obtain rule records due to either InvalidDatapath, OFError or UnexpectedMultiReply")
return RuleRecordTable()
def _prepare_session_config_report(self, stats_msgs):
session_config_dict = {}
for flow_stats in stats_msgs:
for stat in flow_stats:
if stat.table_id != self.tbl_num:
continue
local_f_teid_ng = _get_ng_local_f_id(stat)
if not local_f_teid_ng or local_f_teid_ng == REG_ZERO_VAL:
continue
# Already present
if local_f_teid_ng in session_config_dict:
if local_f_teid_ng != session_config_dict[local_f_teid_ng].local_f_teid:
self.logger.error("Mismatch local TEID value. Need to investigate")
continue
sid = _get_sid(stat)
if not sid:
continue
rule_version = _get_version(stat)
if rule_version == 0:
continue
session_config_dict[local_f_teid_ng] = \
UPFSessionState(
subscriber_id=sid,
session_version=rule_version,
local_f_teid=local_f_teid_ng,
)
SessionStateManager.report_session_config_state(
session_config_dict,
self.ng_config.sessiond_setinterface,
)
def _get_ipv6(self, flow):
if DIRECTION_REG not in flow.match:
return None
if flow.match[DIRECTION_REG] == Direction.OUT:
ip_register = 'ipv6_src'
else:
ip_register = 'ipv6_dst'
if ip_register not in flow.match:
return None
ipv6 = flow.match[ip_register]
# masked value returned as tuple
if type(ipv6) is tuple:
ipv6_addr = ipv6[0]
else:
ipv6_addr = ipv6
prefix = get_ipv6_prefix(ipv6_addr)
interface = self._prefix_mapper.get_interface(prefix)
if interface is None:
return ipv6_addr
# Rebuild UE IPv6 address from prefix map
subnet = ipaddress.ip_address(prefix)
host_id = ipaddress.ip_address(interface)
ue_ip = ipaddress.ip_address(int(subnet) | int(host_id))
self.logger.debug("recalc ue_ip: %s sub: %s host: %s", ue_ip, prefix, host_id)
return str(ue_ip)
def _generate_rule_match(imsi, ip_addr, rule_num, version, direction, local_f_teid_ng=0):
"""
Return a MagmaMatch that matches on the rule num and the version.
"""
ip_match = get_ue_ip_match_args(ip_addr, direction)
return MagmaMatch(
imsi=encode_imsi(imsi), eth_type=get_eth_type(ip_addr),
direction=direction, rule_num=rule_num,
rule_version=version, local_f_teid_ng=local_f_teid_ng,
**ip_match,
)
def _get_sid(flow):
if IMSI_REG not in flow.match:
return None
return decode_imsi(flow.match[IMSI_REG])
def _get_ipv4(flow):
if DIRECTION_REG not in flow.match:
return None
if flow.match[DIRECTION_REG] == Direction.OUT:
ip_register = 'ipv4_src'
else:
ip_register = 'ipv4_dst'
if ip_register not in flow.match:
return None
ipv4 = flow.match[ip_register]
# masked value returned as tuple
if type(ipv4) is tuple:
return ipv4[0]
else:
return ipv4
def _get_version(flow):
if RULE_VERSION_REG not in flow.match:
return None
return flow.match[RULE_VERSION_REG]
def _get_downlink_byte_count(flow_stat):
total_bytes = flow_stat.byte_count
packet_count = flow_stat.packet_count
return total_bytes - ETH_FRAME_SIZE_BYTES * packet_count
def _get_policy_type(match):
if SCRATCH_REGS[1] not in match:
return None
return match[SCRATCH_REGS[1]]
def get_adjusted_delta(begin, end):
# Add on a bit of time to compensate for grpc
return (end - begin + timedelta(milliseconds=150)).total_seconds()
def _get_ng_local_f_id(flow):
if NG_SESSION_ID_REG not in flow.match:
return None
return flow.match[NG_SESSION_ID_REG]
|
[] |
[] |
[
"MAGMA_PRINT_GRPC_PAYLOAD"
] |
[]
|
["MAGMA_PRINT_GRPC_PAYLOAD"]
|
python
| 1 | 0 | |
cmd/newrelic/command.go
|
package main
import (
"errors"
"fmt"
"os"
"strconv"
"time"
"github.com/jedib0t/go-pretty/v6/text"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/newrelic/newrelic-cli/internal/client"
"github.com/newrelic/newrelic-cli/internal/credentials"
"github.com/newrelic/newrelic-cli/internal/install/types"
"github.com/newrelic/newrelic-cli/internal/output"
"github.com/newrelic/newrelic-cli/internal/utils"
"github.com/newrelic/newrelic-client-go/newrelic"
"github.com/newrelic/newrelic-client-go/pkg/accounts"
"github.com/newrelic/newrelic-client-go/pkg/nerdgraph"
)
var outputFormat string
var outputPlain bool
const defaultProfileName string = "default"
// Command represents the base command when called without any subcommands
var Command = &cobra.Command{
PersistentPreRun: initializeCLI,
Use: appName,
Short: "The New Relic CLI",
Long: `The New Relic CLI enables users to perform tasks against the New Relic APIs`,
Version: version,
DisableAutoGenTag: true, // Do not print generation date on documentation
}
func initializeCLI(cmd *cobra.Command, args []string) {
initializeProfile()
}
func initializeProfile() {
var accountID int
var region string
var licenseKey string
var insightsInsertKey string
var err error
credentials.WithCredentials(func(c *credentials.Credentials) {
if c.DefaultProfile != "" {
err = errors.New("default profile already exists, not attempting to initialize")
return
}
apiKey := os.Getenv("NEW_RELIC_API_KEY")
envAccountID := os.Getenv("NEW_RELIC_ACCOUNT_ID")
region = os.Getenv("NEW_RELIC_REGION")
licenseKey = os.Getenv("NEW_RELIC_LICENSE_KEY")
insightsInsertKey = os.Getenv("NEW_RELIC_INSIGHTS_INSERT_KEY")
// If we don't have a personal API key we can't initialize a profile.
if apiKey == "" {
err = errors.New("api key not provided, not attempting to initialize default profile")
return
}
// Default the region to US if it's not in the environment
if region == "" {
region = "US"
}
// Use the accountID from the environment if we have it.
if envAccountID != "" {
accountID, err = strconv.Atoi(envAccountID)
if err != nil {
err = fmt.Errorf("couldn't parse account ID: %s", err)
return
}
}
// We should have an API key by this point, initialize the client.
client.WithClient(func(nrClient *newrelic.NewRelic) {
// If we still don't have an account ID try to look one up from the API.
if accountID == 0 {
accountID, err = fetchAccountID(nrClient)
if err != nil {
return
}
}
if licenseKey == "" {
// We should have an account ID by now, so fetch the license key for it.
licenseKey, err = fetchLicenseKey(nrClient, accountID)
if err != nil {
log.Error(err)
return
}
}
if insightsInsertKey == "" {
// We should have an API key by now, so fetch the insights insert key for it.
insightsInsertKey, err = fetchInsightsInsertKey(nrClient, accountID)
if err != nil {
log.Error(err)
}
}
if !hasProfileWithDefaultName(c.Profiles) {
p := credentials.Profile{
Region: region,
APIKey: apiKey,
AccountID: accountID,
LicenseKey: licenseKey,
InsightsInsertKey: insightsInsertKey,
}
err = c.AddProfile(defaultProfileName, p)
if err != nil {
return
}
log.Infof("profile %s added", text.FgCyan.Sprint(defaultProfileName))
}
if len(c.Profiles) == 1 {
err = c.SetDefaultProfile(defaultProfileName)
if err != nil {
err = fmt.Errorf("error setting %s as the default profile: %s", text.FgCyan.Sprint(defaultProfileName), err)
return
}
log.Infof("setting %s as default profile", text.FgCyan.Sprint(defaultProfileName))
}
})
})
if err != nil {
log.Debugf("couldn't initialize default profile: %s", err)
}
}
func hasProfileWithDefaultName(profiles map[string]credentials.Profile) bool {
for profileName := range profiles {
if profileName == defaultProfileName {
return true
}
}
return false
}
func fetchLicenseKey(client *newrelic.NewRelic, accountID int) (string, error) {
query := `query($accountId: Int!) { actor { account(id: $accountId) { licenseKey } } }`
variables := map[string]interface{}{
"accountId": accountID,
}
for i := 0; i < 3; i++ {
resp, err := client.NerdGraph.Query(query, variables)
if err != nil {
return "", err
}
queryResp := resp.(nerdgraph.QueryResponse)
actor := queryResp.Actor.(map[string]interface{})
account := actor["account"].(map[string]interface{})
if licenseKey, ok := account["licenseKey"]; ok {
return licenseKey.(string), nil
}
time.Sleep(1 * time.Second)
}
return "", types.ErrorFetchingLicenseKey
}
func fetchInsightsInsertKey(client *newrelic.NewRelic, accountID int) (string, error) {
// Check for an existing key first
keys, err := client.APIAccess.ListInsightsInsertKeys(accountID)
if err != nil {
return "", types.ErrorFetchingInsightsInsertKey
}
// We already have a key, return it
if len(keys) > 0 {
return keys[0].Key, nil
}
// Create a new key if one doesn't exist
key, err := client.APIAccess.CreateInsightsInsertKey(accountID)
if err != nil {
return "", types.ErrorFetchingInsightsInsertKey
}
return key.Key, nil
}
// fetchAccountID will try and retrieve an account ID for the given user. If it
// finds more than one account it will returrn an error.
func fetchAccountID(client *newrelic.NewRelic) (int, error) {
params := accounts.ListAccountsParams{
Scope: &accounts.RegionScopeTypes.IN_REGION,
}
accounts, err := client.Accounts.ListAccounts(params)
if err != nil {
return 0, err
}
if len(accounts) == 1 {
return accounts[0].ID, nil
}
return 0, errors.New("multiple accounts found, please set NEW_RELIC_ACCOUNT_ID")
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the RootCmd.
func Execute() error {
Command.Use = appName
Command.Version = version
Command.SilenceUsage = os.Getenv("CI") != ""
// Silence Cobra's internal handling of error messaging
// since we have a custom error handler in main.go
Command.SilenceErrors = true
return Command.Execute()
}
func init() {
cobra.OnInitialize(initConfig)
Command.PersistentFlags().StringVar(&outputFormat, "format", output.DefaultFormat.String(), "output text format ["+output.FormatOptions()+"]")
Command.PersistentFlags().BoolVar(&outputPlain, "plain", false, "output compact text")
}
func initConfig() {
utils.LogIfError(output.SetFormat(output.ParseFormat(outputFormat)))
utils.LogIfError(output.SetPrettyPrint(!outputPlain))
}
|
[
"\"NEW_RELIC_API_KEY\"",
"\"NEW_RELIC_ACCOUNT_ID\"",
"\"NEW_RELIC_REGION\"",
"\"NEW_RELIC_LICENSE_KEY\"",
"\"NEW_RELIC_INSIGHTS_INSERT_KEY\"",
"\"CI\""
] |
[] |
[
"NEW_RELIC_INSIGHTS_INSERT_KEY",
"NEW_RELIC_API_KEY",
"CI",
"NEW_RELIC_LICENSE_KEY",
"NEW_RELIC_REGION",
"NEW_RELIC_ACCOUNT_ID"
] |
[]
|
["NEW_RELIC_INSIGHTS_INSERT_KEY", "NEW_RELIC_API_KEY", "CI", "NEW_RELIC_LICENSE_KEY", "NEW_RELIC_REGION", "NEW_RELIC_ACCOUNT_ID"]
|
go
| 6 | 0 | |
scripts/db-cleanup.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018 SAP SE
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import click
import logging
import os
import six
import time
import sys
from openstack import connection, exceptions, utils
from keystoneauth1 import loading
from keystoneauth1 import session
from cinderclient import client
# prometheus export functionality
from prometheus_client import start_http_server, Gauge
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
# cmdline handling
@click.command()
# every how many minutes the check should be preformed
@click.option('--interval', prompt='Interval in minutes')
# how often a vm should be continously a canditate for some action (delete etc.) before
# we actually do it - the idea behind is that we want to avoid actions due to short
# temporary technical problems of any kind
@click.option('--iterations', prompt='Iterations')
# work on nova db (vms) or cinder db (volumes)?
@click.option('--nova', is_flag=True)
@click.option('--cinder', is_flag=True)
# dry run mode - only say what we would do without actually doing it
@click.option('--dry-run', is_flag=True)
# port to use for prometheus exporter, otherwise we use 9456 as default
@click.option('--port')
class Cleanup:
def __init__(self, interval, iterations, nova, cinder, dry_run, port):
self.interval = interval
self.iterations = iterations
self.novacmdline = nova
self.cindercmdline = cinder
self.dry_run = dry_run
self.port = port
# a dict of all projects we have in openstack
self.projects = dict()
# dicts for the ids we have seen and the ones we want to do something with
self.seen_dict = dict()
self.to_be_dict = dict()
# list of servers, snapshots and volumes we have seen or plan to delete
self.servers_seen = dict()
self.servers_to_be_deleted = dict()
self.snapshots_seen = dict()
self.snapshots_to_be_deleted = dict()
self.volumes_seen = dict()
self.volumes_to_be_deleted = dict()
# define the state to verbal name mapping
self.state_to_name_map = dict()
self.state_to_name_map["delete_server"] = "delete of server"
self.state_to_name_map["delete_volume"] = "delete of volume"
self.state_to_name_map["delete_snapshot"] = "delete of snapshot"
self.gauge_value = dict()
if self.novacmdline:
which_service = "nova"
self.gauge_delete_server = Gauge(which_service + '_nanny_delete_server',
'server deletes of the ' + which_service + ' nanny', ['kind'])
if self.cindercmdline:
which_service = "cinder"
self.gauge_delete_volume = Gauge(which_service + '_nanny_delete_volume',
'volume deletes of the ' + which_service + ' nanny', ['kind'])
self.gauge_delete_snapshot = Gauge(which_service + '_nanny_delete_snapshot',
'snapshot deletes of the ' + which_service + ' nanny', ['kind'])
# Start http server for exported data
if port:
prometheus_exporter_port = self.port
else:
prometheus_exporter_port = 9456
try:
start_http_server(prometheus_exporter_port)
except Exception as e:
logging.error("failed to start prometheus exporter http server: " + str(e))
self.run_me()
def connection_buildup(self):
# a dict of all projects we have in openstack
self.projects = dict()
# openstack connection
try:
self.conn = connection.Connection(auth_url=os.getenv('OS_AUTH_URL'),
project_name=os.getenv('OS_PROJECT_NAME'),
project_domain_name=os.getenv('OS_PROJECT_DOMAIN_NAME'),
username=os.getenv('OS_USERNAME'),
user_domain_name=os.getenv('OS_USER_DOMAIN_NAME'),
password=os.getenv('OS_PASSWORD'),
identity_api_version="3")
except Exception as e:
log.warn("- PLEASE CHECK MANUALLY - problems connecting to openstack: %s - retrying in next loop run",
str(e))
else:
# get all openstack projects
# no exception handling is done here as it would complicate things and we just
# successfully created the connection, so that chance is low to fail
for project in self.conn.identity.projects():
self.projects[project.id] = project.name
if not self.projects:
raise RuntimeError('- PLEASE CHECK MANUALLY - did not get any keystone projects back from the keystone api - this should in theory never happen ...')
if self.cindercmdline:
# cinder client session reusing the auth from the openstacksdk connection session
# this is needed to set the state of volumes and snapshots, which is not yet implemented in the openstacksdk
auth = self.conn.session.auth
sess = session.Session(auth=auth)
self.cinder = client.Client("2.0", session=sess)
def init_seen_dict(self):
for i in self.seen_dict:
self.seen_dict[i] = 0
# reset dict of all vms or volumes we plan to delete from the db
def reset_to_be_dict(self):
for i in self.seen_dict:
if self.seen_dict[i] == 0:
self.to_be_dict[i] = 0
def run_me(self):
if self.novacmdline or self.cindercmdline:
while True:
self.connection_buildup()
if len(self.projects) > 0:
self.os_cleanup_items()
self.send_to_prometheus_exporter()
self.wait_a_moment()
else:
log.info("either the --nova or the --cinder flag should be given - giving up!")
sys.exit(0)
# main cleanup function
def os_cleanup_items(self):
# reset all gauge counters
for kind in ["plan", "dry_run", "done"]:
if self.novacmdline:
self.gauge_value[(kind, "delete_server")] = 0
if self.cindercmdline:
self.gauge_value[(kind, "delete_volume")] = 0
self.gauge_value[(kind, "delete_snapshot")] = 0
# get all instances from nova sorted by their id
try:
self.servers = sorted(self.conn.compute.servers(details=True, all_projects=1), key=lambda x: x.id)
if not self.servers:
raise RuntimeError('- PLEASE CHECK MANUALLY - did not get any nova instances back from the nova api - this should in theory never happen ...')
except exceptions.HttpException as e:
log.warn("- PLEASE CHECK MANUALLY - got an http exception: %s - retrying in next loop run", str(e))
return
except exceptions.SDKException as e:
log.warn("- PLEASE CHECK MANUALLY - got an sdk exception: %s - retrying in next loop run", str(e))
return
if self.novacmdline:
self.seen_dict = self.servers_seen
self.to_be_dict = self.servers_to_be_deleted
self.entity = self.servers
self.check_for_project_id("server")
if self.cindercmdline:
self.snapshot_from = dict()
# get all snapshots from cinder sorted by their id - do the snapshots before the volumes,
# as they are created from them and thus should be deleted first
try:
self.snapshots = sorted(self.conn.block_store.snapshots(details=True, all_projects=1), key=lambda x: x.id)
# if not self.snapshots:
# raise RuntimeError('- PLEASE CHECK MANUALLY - did not get any cinder snapshots back from the cinder api - this should in theory never happen ...')
except exceptions.HttpException as e:
log.warn("- PLEASE CHECK MANUALLY - got an http exception: %s - retrying in next loop run", str(e))
return
except exceptions.SDKException as e:
log.warn("- PLEASE CHECK MANUALLY - got an sdk exception: %s - retrying in next loop run", str(e))
return
if self.snapshots:
# build a dict to check which volume a snapshot was created from quickly
for i in self.snapshots:
self.snapshot_from[i.id] = i.volume_id
self.seen_dict = self.snapshots_seen
self.to_be_dict = self.snapshots_to_be_deleted
self.entity = self.snapshots
self.check_for_project_id("snapshot")
self.is_server = dict()
self.attached_to = dict()
self.volume_project_id = dict()
# get all volumes from cinder sorted by their id
try:
self.volumes = sorted(self.conn.block_store.volumes(details=True, all_projects=1), key=lambda x: x.id)
if not self.volumes:
raise RuntimeError('- PLEASE CHECK MANUALLY - did not get any cinder volumes back from the cinder api - this should in theory never happen ...')
except exceptions.HttpException as e:
log.warn("- PLEASE CHECK MANUALLY - got an http exception: %s - retrying in next loop run", str(e))
return
except exceptions.SDKException as e:
log.warn("- PLEASE CHECK MANUALLY - got an sdk exception: %s - retrying in next loop run", str(e))
return
# build a dict to check later if a server exists quickly
for i in self.servers:
self.is_server[i.id] = i.id
# build a dict to check which server a volume is possibly attached to quickly
for i in self.volumes:
self.volume_project_id[i.id] = i.project_id
# only record attachments where we have any
try:
self.attached_to[i.attachments[0]["id"]] = i.attachments[0]["server_id"]
except IndexError:
pass
self.seen_dict = self.volumes_seen
self.to_be_dict = self.volumes_to_be_deleted
self.entity = self.volumes
self.check_for_project_id("volume")
def wait_a_moment(self):
# wait the interval time
log.info("waiting %s minutes before starting the next loop run", str(self.interval))
time.sleep(60 * int(self.interval))
def check_for_project_id(self, type):
self.init_seen_dict()
for element in self.entity:
# element has an existing project id - we keep it
if self.projects.get(element.project_id):
log.debug("%s %s has a valid project id: %s", type, str(element.id), str(element.project_id))
pass
# element has no existing project id - we plan to delete it
else:
log.debug("%s %s has no valid project id!", type, str(element.id))
self.now_or_later(element.id, "delete_" + type)
# reset the dict of instances we plan to do delete from the db for all machines we did not see or which disappeared
self.reset_to_be_dict()
# here we decide to wait longer before doings the delete from the db or finally doing it
def now_or_later(self, id, what_to_do):
default = 0
self.seen_dict[id] = 1
# if we did not see this more often than iteration times, do or dry-run print what to do - otherwise do not print anything, so that dry-run mode looks like real mode
if self.to_be_dict.get(id, default) <= int(self.iterations):
# we have seen it iteration times, time to act
if self.to_be_dict.get(id, default) == int(self.iterations):
# ... or print if we are only in dry-run mode
if self.dry_run:
log.info("- dry-run: %s %s", self.state_to_name_map[what_to_do], id)
self.gauge_value[('dry_run', what_to_do)] += 1
else:
if what_to_do == "delete_server":
log.info("- action: %s %s", self.state_to_name_map[what_to_do], id)
try:
self.conn.compute.delete_server(id)
self.gauge_value[('done', what_to_do)] += 1
except exceptions.HttpException as e:
log.warn("- PLEASE CHECK MANUALLY - got an http exception: %s - this has to be handled manually", str(e))
elif what_to_do == "delete_snapshot":
log.info("- action: %s %s created from volume %s", self.state_to_name_map[what_to_do], id,
self.snapshot_from[id])
try:
self.conn.block_store.delete_snapshot(id)
self.gauge_value[('done', what_to_do)] += 1
except exceptions.HttpException as e:
log.warn("-- got an http exception: %s", str(e))
log.info("--- action: setting the status of the snapshot %s to error in preparation to delete it", id)
self.cinder.volume_snapshots.reset_state(id, "error")
log.info("--- action: deleting the snapshot %s", id)
try:
self.conn.block_store.delete_snapshot(id)
self.gauge_value[('done', what_to_do)] += 1
except exceptions.HttpException as e:
log.warn("- PLEASE CHECK MANUALY - got an http exception: %s - this has to be handled manually", str(e))
elif what_to_do == "delete_volume":
log.info("- action: %s %s", self.state_to_name_map[what_to_do], id)
try:
self.conn.block_store.delete_volume(id)
self.gauge_value[('done', what_to_do)] += 1
except exceptions.HttpException as e:
log.warn("-- got an http exception: %s", str(e))
log.warn("--- maybe this volume is still connected to an already deleted instance? - checking ...")
if self.attached_to.get(id):
log.info("---- volume is still attached to instance: %s", self.attached_to.get(id))
if not self.is_server.get(self.attached_to.get(id)):
log.info("---- server %s does no longer exist - the volume can thus be deleted", self.attached_to.get(id))
log.info("---- action: detaching the volume %s in preparation to delete it", id)
self.cinder.volumes.detach(id)
log.info("---- action: setting the status of the volume %s to error in preparation to delete it", id)
self.cinder.volumes.reset_state(id, "error")
log.info("---- action: deleting the volume %s", id)
try:
self.conn.block_store.delete_volume(id)
self.gauge_value[('done', what_to_do)] += 1
except exceptions.HttpException as e:
log.warn("- PLEASE CHECK MANUALLY - got an http exception: %s - this has to be handled manually", str(e))
else:
log.info("---- volume is not attached to any instance - must be another problem ...")
else:
log.warn("- PLEASE CHECK MANUALLY - unsupported action requested for id: %s", id)
# otherwise print out what we plan to do in the future
else:
log.info("- plan: %s %s (%i/%i)", self.state_to_name_map[what_to_do], id, self.to_be_dict.get(id, default) + 1, int(self.iterations))
self.gauge_value[('plan', what_to_do)] += 1
self.to_be_dict[id] = self.to_be_dict.get(id, default) + 1
def send_to_prometheus_exporter(self):
for kind in ["plan", "dry_run", "done"]:
if self.novacmdline:
self.gauge_delete_server.labels(kind).set(float(self.gauge_value[(kind, "delete_server")]))
if self.cindercmdline:
self.gauge_delete_volume.labels(kind).set(float(self.gauge_value[(kind, "delete_volume")]))
self.gauge_delete_snapshot.labels(kind).set(float(self.gauge_value[(kind, "delete_snapshot")]))
if __name__ == '__main__':
c = Cleanup()
|
[] |
[] |
[
"OS_PROJECT_NAME",
"OS_USER_DOMAIN_NAME",
"OS_AUTH_URL",
"OS_PASSWORD",
"OS_USERNAME",
"OS_PROJECT_DOMAIN_NAME"
] |
[]
|
["OS_PROJECT_NAME", "OS_USER_DOMAIN_NAME", "OS_AUTH_URL", "OS_PASSWORD", "OS_USERNAME", "OS_PROJECT_DOMAIN_NAME"]
|
python
| 6 | 0 | |
OpenImages_experiments/baseline_logistic_OpenImages.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 17 18:38:40 2018
@author: badat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import os.path
import os
import numpy as np
import time
from average_precision import apk
import global_setting_OpenImage
import D_utility
#%% logging level
tf.logging.set_verbosity(tf.logging.INFO)
#%% override
#global_setting_OpenImage.learning_rate_base = 0.001
global_setting_OpenImage.batch_size=32
global_setting_OpenImage.n_cycles*=1#60*global_setting_OpenImage.report_interval
global_setting_OpenImage.report_interval = 100
global_setting_OpenImage.n_cycles = 3657355//global_setting_OpenImage.batch_size
#%% data flag
idx_GPU=7
beta = 0.02
os.environ["CUDA_VISIBLE_DEVICES"]="{}".format(idx_GPU)
df_image = pd.read_csv('./data/2017_11/train/images.csv')
list_alphas = [0,0.001,0.01,0.1,1]
global_step = tf.Variable(0, trainable=False,dtype=tf.float32)
learning_rate = 1.0/(tf.sqrt(global_step)+1.0)*global_setting_OpenImage.learning_rate_base#tf.Variable(global_setting_OpenImage.learning_rate_base,trainable = False,dtype=tf.float32)
n_iters = 1
schedule_wrt_report_interval = 80
name = 'baseline_logistic_OpenImages'
save_name = name+'.csv'
df_result = pd.DataFrame()
is_save = False
global_setting_OpenImage.report_interval = 100
#n_process_map = 4
#%%
print('number of cycles {}'.format(global_setting_OpenImage.n_cycles))
#%% label mapping function
def LoadLabelMap(labelmap_path, dict_path):
"""Load index->mid and mid->display name maps.
Args:
labelmap_path: path to the file with the list of mids, describing
predictions.
dict_path: path to the dict.csv that translates from mids to display names.
Returns:
labelmap: an index to mid list
label_dict: mid to display name dictionary
"""
labelmap = [line.rstrip() for line in tf.gfile.GFile(labelmap_path)]
label_dict = {}
for line in tf.gfile.GFile(dict_path):
words = [word.strip(' "\n') for word in line.split(',', 1)]
label_dict[words[0]] = words[1]
return labelmap, label_dict
predictions_eval = 0
predictions_eval_resize = 0
#%%
labelmap, label_dict = LoadLabelMap(global_setting_OpenImage.labelmap_path, global_setting_OpenImage.dict_path)
list_label = []
for id_name in labelmap:
list_label.append(label_dict[id_name])
#%% Dataset
def parser(record):
feature = {'img_id': tf.FixedLenFeature([], tf.string),
'feature': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string)}
parsed = tf.parse_single_example(record, feature)
img_id = parsed['img_id']
feature = tf.decode_raw( parsed['feature'],tf.float32)
label = tf.decode_raw( parsed['label'],tf.int32)
return img_id,feature,label
#%% load in memory
sess = tf.InteractiveSession()#tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
g = tf.get_default_graph()
#with g.as_default():
saver = tf.train.import_meta_graph(global_setting_OpenImage.model_path+ '.meta')
saver.restore(sess, global_setting_OpenImage.model_path)
weight = tf.squeeze( g.get_tensor_by_name('resnet_v1_101/logits/weights:0'))
bias = g.get_tensor_by_name('resnet_v1_101/logits/biases:0')[tf.newaxis,:]
init_Theta = tf.concat([weight,bias],name='Theta',axis = 0).eval()
#%%
Theta = tf.get_variable('Theta',shape=[2049,5000])
#%%
dataset = tf.data.TFRecordDataset(global_setting_OpenImage.record_path)
dataset = dataset.map(parser)
dataset = dataset.shuffle(20000)
dataset = dataset.batch(global_setting_OpenImage.batch_size)
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
(img_ids,img_features,labels) = iterator.get_next()
dataset_in = tf.data.TFRecordDataset(global_setting_OpenImage.validation_path)
dataset_in = dataset_in.map(parser).batch(50000)
(img_val_ids,F_val,val_labels) = dataset_in.make_one_shot_iterator().get_next()
F_val = tf.concat([F_val,tf.ones([tf.shape(F_val)[0],1])],axis = 1)
(img_val_ids,F_val,val_labels)=sess.run([img_val_ids,F_val,val_labels])
#%%
def AP(prediction,label,names):
mask = np.abs(label)==1
if np.sum(label==1)==0:
return 0.0
groundtruth = names[label == 1]
prediction = prediction[mask]
retrieval = names[mask]
sort_idx = np.argsort(prediction)[::-1]
retrieval = retrieval[sort_idx]
return apk(groundtruth,retrieval,len(prediction))
def compute_AP(Prediction,Label,names):
num_class = Prediction.shape[1]
ap=np.zeros(num_class)
for idx_cls in range(num_class):
prediction = Prediction[:,idx_cls]
label = Label[:,idx_cls]
ap[idx_cls]=AP(prediction,label,names)
return ap
#%%
F = tf.squeeze(img_features)
F = tf.concat([F,tf.ones([tf.shape(F)[0],1])],axis = 1)
#%%
alpha_regularizer_var = tf.get_variable('alpha_regularizer',dtype=tf.float32,trainable=False, shape=())
alpha_regularizer_var_fh = tf.placeholder(dtype=tf.float32, shape=())
#%%
op_alpha_regularizer = alpha_regularizer_var.assign(alpha_regularizer_var_fh)
#%%
G = np.load('./label_graph/graph_label_naive.npy').astype(np.float32)
G=D_utility.preprocessing_graph(G)
G_empty_diag = G - np.diag(np.diag(G))
G_init=G_empty_diag[G_empty_diag!=0]
G_var = tf.get_variable("G_var", G_init.shape)
op_G_var=G_var.assign(G_init)
indices = []
#updates = []
shape = tf.constant(G.shape)
counter = 0
diag_G = tf.diag(np.diag(G))
for idx_row in range(G_empty_diag.shape[1]):
idx_cols = np.where(G_empty_diag[idx_row,:]!=0)[0]
for idx_col in idx_cols:
if G[idx_row,idx_col]-G_init[counter] != 0:
raise Exception('error relation construction')
if idx_row != idx_col:
indices.append([idx_row,idx_col])
counter += 1
part_G_var = tf.scatter_nd(indices, G_var, shape)+diag_G
#%% disperse measurement
dispersion=tf.reduce_sum(tf.abs(part_G_var)) - tf.reduce_sum(tf.diag_part(tf.abs(part_G_var)))
#%%
with tf.variable_scope("logistic"):
logits = tf.matmul(F,Theta)
labels_binary = tf.div(labels+1,2)
labels_weight = tf.abs(labels)
loss_logistic = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels_binary, logits=logits,weights=labels_weight)
with tf.variable_scope("regularizer"):
loss_regularizer = tf.square(tf.norm(Theta))
#%% shared operation
grad_logistic = tf.gradients(loss_logistic, Theta)
grad_regularizer = tf.gradients(loss_regularizer,Theta)
norm_grad_logistic = tf.norm(grad_logistic)
norm_grad_regularizer = tf.norm(grad_regularizer)
norm_Theta = tf.norm(Theta)
raitio_regularizer_grad = norm_grad_logistic/norm_grad_regularizer
validate_Prediction = tf.matmul(F_val,Theta)
#%%
tf.global_variables_initializer().run()
sess.run(iterator.initializer)
#%%
def append_info(m_AP,loss_value,lr_v):
res_mAP[index]=m_AP
res_loss[index] = loss_value
res_lr[index]=lr_v
df_result['mAP: regularizer {}'.format(alpha_regularizer)]=res_mAP
df_result['loss: regularizer {}'.format(alpha_regularizer)]=res_loss
df_result['lr: regularizer {}'.format(alpha_regularizer)]=res_lr
#%%
Theta_fh = tf.placeholder(dtype=tf.float32, shape=[2049,5000])
op_assign_Theta = Theta.assign(Theta_fh)
global_step_fh=tf.placeholder(dtype=tf.float32,shape=())
op_assign_global_step = global_step.assign(global_step_fh)
#%%
tf.global_variables_initializer().run()
sess.run(op_G_var)
sess.run(op_assign_Theta,{Theta_fh:init_Theta})
sess.run(iterator.initializer)
#%%
#optimizer = tf.train.AdamOptimizer(learning_rate=global_setting_OpenImage.learning_rate_base)#tf.train.RMSPropOptimizer(learning_rate=learning_rate)#,momentum=0.9
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
0.9, # decay
0.9, # momentum
1.0 #rmsprop_epsilon
)
loss = loss_logistic
#%% hypergradient
grad_loss = tf.gradients(loss, Theta)
#%%
train = optimizer.minimize(loss,var_list=[Theta],global_step = global_step)
print('done placeholder assignment')
def experiment_cond_success():
return alpha_colaborative_o == 0.1 and alpha_feature_o ==0
n_experiment= 0
for idx_alpha_colaborative,alpha_colaborative_o in enumerate(list_alphas):
for idx_alpha_feature,alpha_feature_o in enumerate(list_alphas):
for idx_alpha_regularizer,alpha_regularizer_o in enumerate([0]):
if not experiment_cond_success():#index_column <= 4:#(idx_alpha_colaborative == 0 and idx_alpha_feature != 1) or idx_alpha_regularizer != 0 or
print('skip')
continue
n_experiment += 1
print('Total number of experiment: {}'.format(n_experiment))
Thetas = np.zeros((2049,5000,n_experiment))
Gs = np.zeros((5000,5000,n_experiment))
idx_experiment = 0
expon_moving_avg_old = np.inf
expon_moving_avg_new = 0
for idx_alpha_colaborative,alpha_colaborative_o in enumerate(list_alphas):
for idx_alpha_feature,alpha_feature_o in enumerate(list_alphas):
for idx_alpha_regularizer,alpha_regularizer_o in enumerate([0]):
index_column = idx_alpha_colaborative*len(list_alphas)*len(list_alphas)+idx_alpha_feature*len(list_alphas)+idx_alpha_regularizer
if not experiment_cond_success():#index_column <= 4:#(idx_alpha_colaborative == 0 and idx_alpha_feature != 1) or idx_alpha_regularizer != 0 or
print('skip')
continue
report_length = global_setting_OpenImage.n_cycles*n_iters//global_setting_OpenImage.report_interval +1 #in case that my lousy computation is wrong
print('report length {}'.format(report_length))
res_mAP = np.zeros(report_length)
res_loss = np.zeros(report_length)
res_loss_logistic=np.zeros(report_length)
res_loss_R=np.zeros(report_length)
res_loss_feature=np.zeros(report_length)
res_grad_logistic=np.zeros(report_length)
res_grad_R=np.zeros(report_length)
res_lr=np.zeros(report_length)
#loss_R#
sess.run(iterator.initializer)
alpha_regularizer = alpha_regularizer_o
tf.global_variables_initializer().run()
print('reset Theta')
sess.run(op_G_var)
sess.run(op_assign_Theta,{Theta_fh:init_Theta})
sess.run(op_alpha_regularizer,{alpha_regularizer_var_fh:alpha_regularizer})
df_ap = pd.DataFrame()
df_ap['label']=list_label
#%%
tic = time.clock()
for idx_cycle in range(global_setting_OpenImage.n_cycles):
_,loss_value,lr_v = sess.run([train,loss,learning_rate])
index = (idx_cycle*n_iters)//global_setting_OpenImage.report_interval
if (idx_cycle*n_iters) % global_setting_OpenImage.report_interval == 0 :#or idx_iter == n_iters-1:
print('Elapsed time udapte: {}'.format(time.clock()-tic))
tic = time.clock()
print('index {} -- compute mAP'.format(index))
print('Loss {} lr {}'.format(loss_value,lr_v))
validate_Prediction_val = validate_Prediction.eval()
ap = compute_AP(validate_Prediction_val,val_labels,img_val_ids)
df_ap['index {}'.format(index)]=ap
m_AP=np.mean(ap)
#
append_info(m_AP,loss_value,lr_v)
print('mAP {}'.format(m_AP))
if is_save:
Thetas[:,:,idx_experiment]=Theta.eval()
Gs[:,:,idx_experiment]=part_G_var.eval()
df_result.to_csv('./result/'+save_name)
ap_save_name = './result/baseline_ap_{}.csv'
df_ap.to_csv(ap_save_name.format(alpha_regularizer))
if index%(int(report_length/4)) == 0:
np.savez('./result/'+name, Thetas=Thetas, Gs=Gs)
idx_experiment+=1
if is_save:
np.savez('./result/'+name, Thetas=Thetas, Gs=Gs)
#%%
sess.close()
tf.reset_default_graph()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
search_script.py
|
from __future__ import division
import requests
import json
import sys
import os
from elasticsearch import Elasticsearch
from elasticsearch import exceptions
try:
# idrac_ip = os.environ['IDRAC_IP']
# idrac_username = os.environ['IDRAC_USERNAME']
# idrac_password = os.environ['IDRAC_PASSWORD']
# elastic_ip = os.environ['ELASTIC_IP']
# elastic_username = os.environ['ELASTIC_USERNAME']
# elastic_password = os.environ['ELASTIC_PASSWORD']
idrac_ip="100.98.26.49"
idrac_username="root"
idrac_password="calvin"
elastic_ip="100.98.26.172"
elastic_username="elastic"
elastic_password="changeme"
es = Elasticsearch([elastic_ip],
http_auth=(elastic_username, elastic_password),
scheme="http",
port=9200,
)
except Exception as e:
print("- FAIL: You must pass in script name along with iDRAC IP / iDRAC username / iDRAC password")
sys.exit(0)
def retrieve_logs():
index_name="lc"+idrac_ip
res=es.search(index=index_name, body={
"query":{
"range": {
"timestamp": {
"gte" : "now-5m",
"lt" : "now"
}
}
}
}
)
# print(data)
codes = {}
code_types={}
for i in res['hits']['hits']:
#print(i)
#print("\n")
for key,value in i['_source'].items():
if key=='MessageID':
code=value
code_type=value[0:3]
#print(code_type)
if code in codes:
codes[code]=codes[code]+1
else:
codes.update({code: 1})
if code_type in code_types:
code_types[code_type]=code_types[code_type]+1
else:
code_types.update({code_type: 1})
total_errors=sum(codes.values())
# print total_errors
error_percentage={}
print "\nFor Server: ",idrac_ip
# print "__________________________ \n\n\n"
print("\n\n\n")
print "Error Codes Occurrence Percentage "
print "____________________________________________ \n"
for key,value in codes.items():
error_percentage[key]= (value/total_errors)*100
print key," ",value," ",error_percentage[key],"%"
print "\n"
print "Error Types Occurrence "
print "__________________________ \n"
for key,value in code_types.items():
print key," ",value
# print(codes)
# print(code_types)
# print (total_errors)
# print error_percentage
retrieve_logs()
|
[] |
[] |
[
"IDRAC_PASSWORD",
"ELASTIC_PASSWORD",
"ELASTIC_USERNAME",
"ELASTIC_IP",
"IDRAC_IP",
"IDRAC_USERNAME"
] |
[]
|
["IDRAC_PASSWORD", "ELASTIC_PASSWORD", "ELASTIC_USERNAME", "ELASTIC_IP", "IDRAC_IP", "IDRAC_USERNAME"]
|
python
| 6 | 0 | |
mongodb-datastore/restapi/configure_mongodb_datastore.go
|
// This file is safe to edit. Once it exists it will not be overwritten
package restapi
import (
"crypto/tls"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
keptnapi "github.com/keptn/go-utils/pkg/api/utils"
errors "github.com/go-openapi/errors"
runtime "github.com/go-openapi/runtime"
middleware "github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/swag"
"github.com/keptn/keptn/mongodb-datastore/handlers"
"github.com/keptn/keptn/mongodb-datastore/models"
"github.com/keptn/keptn/mongodb-datastore/restapi/operations"
"github.com/keptn/keptn/mongodb-datastore/restapi/operations/event"
)
//go:generate swagger generate server --target ../../mongodb-datastore --name mongodb-datastore --spec ../swagger.yaml
func configureFlags(api *operations.MongodbDatastoreAPI) {
// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }
}
func configureAPI(api *operations.MongodbDatastoreAPI) http.Handler {
// configure the api here
api.ServeError = errors.ServeError
// Set your custom logger if needed. Default one is log.Printf
// Expected interface func(string, ...interface{})
//
// Example:
// api.Logger = log.Printf
api.JSONConsumer = runtime.JSONConsumer()
api.JSONProducer = runtime.JSONProducer()
api.EventSaveEventHandler = event.SaveEventHandlerFunc(func(params event.SaveEventParams) middleware.Responder {
if err := handlers.ProcessEvent(params.Body); err != nil {
return event.NewSaveEventDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String(err.Error())})
}
return event.NewSaveEventCreated()
})
api.EventGetEventsHandler = event.GetEventsHandlerFunc(func(params event.GetEventsParams) middleware.Responder {
events, err := handlers.GetEvents(params)
if err != nil {
return event.NewGetEventsDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String(err.Error())})
}
return event.NewGetEventsOK().WithPayload(events)
})
api.ServerShutdown = func() {}
return setupGlobalMiddleware(api.Serve(setupMiddlewares))
}
// The TLS configuration before HTTPS server starts.
func configureTLS(tlsConfig *tls.Config) {
// Make all necessary changes to the TLS configuration here.
}
// As soon as server is initialized but not run yet, this function will be called.
// If you need to modify a config, store server instance to stop it individually later, this is the place.
// This function can be called multiple times, depending on the number of serving schemes.
// scheme value will be set accordingly: "http", "https" or "unix"
func configureServer(s *http.Server, scheme, addr string) {
}
// The middleware configuration is for the handler executors. These do not apply to the swagger.json document.
// The middleware executes after routing but before authentication, binding and validation
func setupMiddlewares(handler http.Handler) http.Handler {
return handler
}
// The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document.
// So this is a good place to plug in a panic handling middleware, logging and metrics
func setupGlobalMiddleware(handler http.Handler) http.Handler {
prefixPath := os.Getenv("PREFIX_PATH")
if len(prefixPath) > 0 {
// Set the prefix-path in the swagger.yaml
input, err := ioutil.ReadFile("swagger-ui/swagger.yaml")
if err == nil {
editedSwagger := strings.Replace(string(input), "basePath: /api/mongodb-datastore",
"basePath: "+prefixPath+"/api/mongodb-datastore", -1)
err = ioutil.WriteFile("swagger-ui/swagger.yaml", []byte(editedSwagger), 0644)
if err != nil {
fmt.Println("Failed to write edited swagger.yaml")
}
} else {
fmt.Println("Failed to set basePath in swagger.yaml")
}
}
go keptnapi.RunHealthEndpoint("10999")
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Serving ./swagger-ui/
if strings.Index(r.URL.Path, "/swagger-ui/") == 0 {
pathToSwaggerUI := "swagger-ui"
// in case of local execution, the dir is stored in a parent folder
if _, err := os.Stat(pathToSwaggerUI); os.IsNotExist(err) {
pathToSwaggerUI = "../../swagger-ui"
}
http.StripPrefix("/swagger-ui/", http.FileServer(http.Dir(pathToSwaggerUI))).ServeHTTP(w, r)
return
}
handler.ServeHTTP(w, r)
})
}
|
[
"\"PREFIX_PATH\""
] |
[] |
[
"PREFIX_PATH"
] |
[]
|
["PREFIX_PATH"]
|
go
| 1 | 0 | |
vendor/github.com/hyperhq/hypercli/cliconfig/config.go
|
package cliconfig
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/hyperhq/hyper-api/types"
"github.com/hyperhq/hypercli/pkg/homedir"
)
const (
// ConfigFileName is the name of config file
ConfigFileName = "config.json"
oldConfigfile = ".dockercfg"
// This constant is only used for really old config files when the
// URL wasn't saved as part of the config file and it was just
// assumed to be this value.
defaultIndexserver = "https://index.docker.io/v1/"
DefaultHyperFormat = "tcp://*.hyper.sh:443"
DefaultHyperRegion = "us-west-1"
DefaultHyperEndpoint = "hyper.sh:443"
)
var (
configDir = os.Getenv("HYPER_CONFIG")
)
func init() {
if configDir == "" {
configDir = filepath.Join(homedir.Get(), ".hyper")
}
}
// ConfigDir returns the directory the configuration file is stored in
func ConfigDir() string {
return configDir
}
// SetConfigDir sets the directory the configuration file is stored in
func SetConfigDir(dir string) {
configDir = dir
}
type CloudConfig struct {
AccessKey string `json:"accesskey"`
SecretKey string `json:"secretkey"`
Region string `json:"region"`
}
// ConfigFile ~/.docker/config.json file info
type ConfigFile struct {
AuthConfigs map[string]types.AuthConfig `json:"auths"`
CloudConfig map[string]CloudConfig `json:"clouds"`
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
PsFormat string `json:"psFormat,omitempty"`
ImagesFormat string `json:"imagesFormat,omitempty"`
VolumesFormat string `json:"volumesFormat,omitempty"`
DetachKeys string `json:"detachKeys,omitempty"`
filename string // Note: not serialized - for internal use only
}
// NewConfigFile initializes an empty configuration file for the given filename 'fn'
func NewConfigFile(fn string) *ConfigFile {
return &ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
CloudConfig: make(map[string]CloudConfig),
HTTPHeaders: make(map[string]string),
filename: fn,
}
}
// LegacyLoadFromReader reads the non-nested configuration data given and sets up the
// auth config information with given directory and populates the receiver object
func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
b, err := ioutil.ReadAll(configData)
if err != nil {
return err
}
if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil {
arr := strings.Split(string(b), "\n")
if len(arr) < 2 {
return fmt.Errorf("The Auth config file is empty")
}
authConfig := types.AuthConfig{}
origAuth := strings.Split(arr[0], " = ")
if len(origAuth) != 2 {
return fmt.Errorf("Invalid Auth config file")
}
authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
if err != nil {
return err
}
origEmail := strings.Split(arr[1], " = ")
if len(origEmail) != 2 {
return fmt.Errorf("Invalid Auth config file")
}
authConfig.Email = origEmail[1]
authConfig.ServerAddress = defaultIndexserver
configFile.AuthConfigs[defaultIndexserver] = authConfig
} else {
for k, authConfig := range configFile.AuthConfigs {
authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
if err != nil {
return err
}
authConfig.Auth = ""
authConfig.ServerAddress = k
configFile.AuthConfigs[k] = authConfig
}
}
return nil
}
// LoadFromReader reads the configuration data given and sets up the auth config
// information with given directory and populates the receiver object
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
return err
}
var err error
for addr, ac := range configFile.AuthConfigs {
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
if err != nil {
return err
}
ac.Auth = ""
ac.ServerAddress = addr
configFile.AuthConfigs[addr] = ac
}
return nil
}
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
// a non-nested reader
func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) {
configFile := ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
}
err := configFile.LegacyLoadFromReader(configData)
return &configFile, err
}
// LoadFromReader is a convenience function that creates a ConfigFile object from
// a reader
func LoadFromReader(configData io.Reader) (*ConfigFile, error) {
configFile := ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
CloudConfig: make(map[string]CloudConfig),
}
err := configFile.LoadFromReader(configData)
return &configFile, err
}
// Load reads the configuration files in the given directory, and sets up
// the auth config information and return values.
// FIXME: use the internal golang config parser
func Load(configDir string) (*ConfigFile, error) {
if configDir == "" {
configDir = ConfigDir()
}
configFile := ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
CloudConfig: make(map[string]CloudConfig),
filename: filepath.Join(configDir, ConfigFileName),
}
// Try happy path first - latest config file
if _, err := os.Stat(configFile.filename); err == nil {
file, err := os.Open(configFile.filename)
if err != nil {
return &configFile, fmt.Errorf("%s - %v", configFile.filename, err)
}
defer file.Close()
err = configFile.LoadFromReader(file)
if err != nil {
err = fmt.Errorf("%s - %v", configFile.filename, err)
}
return &configFile, err
} else if !os.IsNotExist(err) {
// if file is there but we can't stat it for any reason other
// than it doesn't exist then stop
return &configFile, fmt.Errorf("%s - %v", configFile.filename, err)
}
// Can't find latest config file so check for the old one
confFile := filepath.Join(homedir.Get(), oldConfigfile)
if _, err := os.Stat(confFile); err != nil {
return &configFile, nil //missing file is not an error
}
file, err := os.Open(confFile)
if err != nil {
return &configFile, fmt.Errorf("%s - %v", confFile, err)
}
defer file.Close()
err = configFile.LegacyLoadFromReader(file)
if err != nil {
return &configFile, fmt.Errorf("%s - %v", confFile, err)
}
if configFile.HTTPHeaders == nil {
configFile.HTTPHeaders = map[string]string{}
}
return &configFile, nil
}
// SaveToWriter encodes and writes out all the authorization information to
// the given writer
func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
// Encode sensitive data into a new/temp struct
tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs))
for k, authConfig := range configFile.AuthConfigs {
authCopy := authConfig
// encode and save the authstring, while blanking out the original fields
authCopy.Auth = encodeAuth(&authCopy)
authCopy.Username = ""
authCopy.Password = ""
authCopy.ServerAddress = ""
tmpAuthConfigs[k] = authCopy
}
saveAuthConfigs := configFile.AuthConfigs
configFile.AuthConfigs = tmpAuthConfigs
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
data, err := json.MarshalIndent(configFile, "", "\t")
if err != nil {
return err
}
_, err = writer.Write(data)
return err
}
// Save encodes and writes out all the authorization information
func (configFile *ConfigFile) Save() error {
if configFile.Filename() == "" {
return fmt.Errorf("Can't save config with empty filename")
}
if err := os.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil {
return err
}
f, err := os.OpenFile(configFile.filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
return configFile.SaveToWriter(f)
}
// Filename returns the name of the configuration file
func (configFile *ConfigFile) Filename() string {
return configFile.filename
}
// encodeAuth creates a base64 encoded string to containing authorization information
func encodeAuth(authConfig *types.AuthConfig) string {
authStr := authConfig.Username + ":" + authConfig.Password
msg := []byte(authStr)
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
base64.StdEncoding.Encode(encoded, msg)
return string(encoded)
}
// decodeAuth decodes a base64 encoded string and returns username and password
func decodeAuth(authStr string) (string, string, error) {
decLen := base64.StdEncoding.DecodedLen(len(authStr))
decoded := make([]byte, decLen)
authByte := []byte(authStr)
n, err := base64.StdEncoding.Decode(decoded, authByte)
if err != nil {
return "", "", err
}
if n > decLen {
return "", "", fmt.Errorf("Something went wrong decoding auth config")
}
arr := strings.SplitN(string(decoded), ":", 2)
if len(arr) != 2 {
return "", "", fmt.Errorf("Invalid auth configuration file")
}
password := strings.Trim(arr[1], "\x00")
return arr[0], password, nil
}
|
[
"\"HYPER_CONFIG\""
] |
[] |
[
"HYPER_CONFIG"
] |
[]
|
["HYPER_CONFIG"]
|
go
| 1 | 0 | |
gocd/gocd/gofilecounter.go
|
/*
Sniperkit-Bot
- Status: analyzed
*/
// Copyright 2016 Palantir Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gocd
import (
"os"
"path"
"strings"
)
type ProjectGoFileCounter interface {
NGoFiles(pkg string) (int, bool)
NTotalGoFiles(pkg string) (int, bool)
}
type projectGoFileCounter struct {
ProjectPkgInfoer
counts map[string]goFileCount
}
type goFileCount struct {
pkg int
total int
}
func NewProjectGoFileCounter(p ProjectPkgInfoer) (ProjectGoFileCounter, error) {
counter := projectGoFileCounter{
ProjectPkgInfoer: p,
counts: make(map[string]goFileCount),
}
// cache from pkg -> all packages imported by the package (recursive)
importsCache := make(map[string]map[string]*PkgInfo)
for _, v := range p.PkgInfos() {
// determine file count by determining all of the unique packages imported by a package and then summing
// up the package file count of each. This approach is required to avoid double-counting packages that
// are imported multiple times.
if _, err := counter.allImports(v, importsCache, counter.counts); err != nil {
return nil, err
}
}
return &counter, nil
}
func (p *projectGoFileCounter) NGoFiles(pkg string) (int, bool) {
if c, ok := p.counts[pkg]; ok {
return c.pkg, ok
}
return 0, false
}
func (p *projectGoFileCounter) NTotalGoFiles(pkg string) (int, bool) {
if c, ok := p.counts[pkg]; ok {
return c.total, ok
}
return 0, false
}
func (p *projectGoFileCounter) allImports(pkg *PkgInfo, cache map[string]map[string]*PkgInfo, countsMap map[string]goFileCount) (map[string]*PkgInfo, error) {
if v, ok := cache[pkg.Path]; ok {
return v, nil
}
pkgImports := make(map[string]*PkgInfo)
for k := range pkg.Imports {
var importPkg *PkgInfo
if v, ok := p.PkgInfo(k); ok {
importPkg = &v
} else {
if newImportPkg, empty, err := ImportPkgInfo(k, path.Join(os.Getenv("GOPATH"), "src", strings.TrimSuffix(pkg.Path, "_test")), Default); err != nil {
return nil, err
} else if !empty {
importPkg = &newImportPkg
}
}
pkgImports[importPkg.Path] = importPkg
result, err := p.allImports(importPkg, cache, countsMap)
if err != nil {
return nil, err
}
for k, v := range result {
pkgImports[k] = v
}
}
cache[pkg.Path] = pkgImports
// compute and populate counts
counts := goFileCount{
pkg: pkg.NGoFiles,
total: pkg.NGoFiles,
}
for _, v := range pkgImports {
counts.total += v.NGoFiles
}
countsMap[pkg.Path] = counts
return pkgImports, nil
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"flag"
"fmt"
"os"
"strings"
"github.com/mozqnet/go-exploitdb/commands"
)
// Name :
const Name string = "go-exploitdb"
var version = "1.0.0"
func main() {
var v = flag.Bool("v", false, "Show version")
if envArgs := os.Getenv("GOVAL_DICTIONARY_ARGS"); 0 < len(envArgs) {
flag.CommandLine.Parse(strings.Fields(envArgs))
} else {
flag.Parse()
}
if *v {
fmt.Printf("go-exploitdb %s \n", version)
os.Exit(0)
}
if err := commands.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
|
[
"\"GOVAL_DICTIONARY_ARGS\""
] |
[] |
[
"GOVAL_DICTIONARY_ARGS"
] |
[]
|
["GOVAL_DICTIONARY_ARGS"]
|
go
| 1 | 0 | |
examples/cns/v20190624/record_list.go
|
package main
import (
"fmt"
"os"
"github.com/iftechio/tencentcloud-sdk-go/tencentcloud/cns/v20190624"
"github.com/iftechio/tencentcloud-sdk-go/tencentcloud/common"
"github.com/iftechio/tencentcloud-sdk-go/tencentcloud/common/errors"
"github.com/iftechio/tencentcloud-sdk-go/tencentcloud/common/profile"
"github.com/iftechio/tencentcloud-sdk-go/tencentcloud/common/regions"
)
func main() {
// 必要步骤:
// 实例化一个认证对象,入参需要传入腾讯云账户密钥对secretId,secretKey。
// 这里采用的是从环境变量读取的方式,需要在环境变量中先设置这两个值。
// 你也可以直接在代码中写死密钥对,但是小心不要将代码复制、上传或者分享给他人,
// 以免泄露密钥对危及你的财产安全。
credential := common.NewCredential(
os.Getenv("TENCENTCLOUD_SECRET_ID"),
os.Getenv("TENCENTCLOUD_SECRET_KEY"),
)
// 非必要步骤
// 实例化一个客户端配置对象,可以指定超时时间等配置
cpf := profile.NewClientProfile()
// SDK默认使用POST方法。
// 如果你一定要使用GET方法,可以在这里设置。GET方法无法处理一些较大的请求。
cpf.HttpProfile.ReqMethod = "GET"
// SDK有默认的超时时间,非必要请不要进行调整。
// 如有需要请在代码中查阅以获取最新的默认值。
cpf.HttpProfile.ReqTimeout = 10
// 必须手动指定域名 "cns.api.qcloud.com"
cpf.HttpProfile.Endpoint = "cns.api.qcloud.com"
// 使用 "HmacSHA1" 或者 "HmacSHA256"
cpf.SignMethod = "HmacSHA1"
// 实例化要请求产品的client对象
// 第二个参数是地域信息,可以直接填写字符串ap-guangzhou,或者引用预设的常量
client, _ := cns.NewClient(credential, regions.Shanghai, cpf)
// 实例化一个请求对象,根据调用的接口和实际情况,可以进一步设置请求参数
// 属性可能是基本类型,也可能引用了另一个数据结构。
// 推荐使用IDE进行开发,可以方便的跳转查阅各个接口和数据结构的文档说明。
request := cns.NewRecordListRequest()
// 注意:需要设置 request.path
request.SetPath("/v2/index.php")
// 基本类型的设置。
// 此接口允许设置返回的实例数量。此处指定为只返回一个。
// SDK采用的是指针风格指定参数,即使对于基本类型你也需要用指针来对参数赋值。
// SDK提供对基本类型的指针引用封装函数
domain := "jijigugu.club"
offset, length := 0, 100
request.Domain = &domain
request.Offset = common.IntPtr(offset)
request.Length = common.IntPtr(length)
// 通过client对象调用想要访问的接口,需要传入请求对象
response, err := client.RecordList(request)
// 处理异常
if _, ok := err.(*errors.TencentCloudSDKError); ok {
fmt.Printf("An API error has returned: %s", err)
return
}
// 非SDK异常,直接失败。实际代码中可以加入其他的处理。
if err != nil {
panic(err)
}
// 打印返回的json字符串
fmt.Printf("%s", response.ToJsonString())
}
|
[
"\"TENCENTCLOUD_SECRET_ID\"",
"\"TENCENTCLOUD_SECRET_KEY\""
] |
[] |
[
"TENCENTCLOUD_SECRET_ID",
"TENCENTCLOUD_SECRET_KEY"
] |
[]
|
["TENCENTCLOUD_SECRET_ID", "TENCENTCLOUD_SECRET_KEY"]
|
go
| 2 | 0 | |
main.go
|
package main
import (
"fmt"
"github.com/robfig/cron/v3"
"log"
"os"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
)
var (
envDry, _ = strconv.ParseBool(os.Getenv("CFG_DRY"))
envOnce, _ = strconv.ParseBool(os.Getenv("CFG_ONCE"))
envCron = strings.TrimSpace(os.Getenv("CFG_CRON"))
now = time.Now()
todayMarks = []string{
fmt.Sprintf("%04d-%02d-%02d", now.Year(), now.Month(), now.Day()),
fmt.Sprintf("%04d.%02d.%02d", now.Year(), now.Month(), now.Day()),
fmt.Sprintf("%04d_%02d_%02d", now.Year(), now.Month(), now.Day()),
}
regexpLogFile = regexp.MustCompile(`(?i)\.log$`)
regexpHistoricalLogFile = []*regexp.Regexp{
regexp.MustCompile(`(?i)ROT.+\.log.*$`),
regexp.MustCompile(`(?i)\.log.*\.gz.*$`),
regexp.MustCompile(`(?i)\.log[_.-]\d+$`),
regexp.MustCompile(`(?i)\.log[_.-]\d{4}[_.-]\d{2}[_.-]\d{2}.*$`),
regexp.MustCompile(`(?i)[_.-]\d+\.log$`),
regexp.MustCompile(`(?i)\.log[_.-]\d+$`),
regexp.MustCompile(`(?i)\d{4}[_.-]\d{2}[_.-]\d{2}.*\.log$`),
}
)
type FileType int
const (
FileTypeNone FileType = iota
FileTypeActiveLog
FileTypeHistoryLog
)
func determineFileType(name string) FileType {
for _, p := range regexpHistoricalLogFile {
if p.MatchString(name) {
for _, todayMark := range todayMarks {
if strings.Contains(name, todayMark) {
return FileTypeActiveLog
}
}
return FileTypeHistoryLog
}
}
if regexpLogFile.MatchString(name) {
return FileTypeActiveLog
}
return FileTypeNone
}
func main() {
if envOnce {
execute()
}
if envCron != "" {
c := cron.New()
if _, err := c.AddFunc(envCron, execute); err != nil {
log.Println("failed to initialize cron:", err.Error())
os.Exit(1)
}
c.Start()
defer c.Stop()
}
chSig := make(chan os.Signal, 1)
signal.Notify(chSig, syscall.SIGTERM, syscall.SIGINT)
sig := <-chSig
log.Println("signal caught:", sig.String())
}
func execute() {
err := filepath.Walk("/mnt", func(path string, info os.FileInfo, err error) error {
// ignore error
if err != nil {
log.Printf("%s: %s", path, err.Error())
return nil
}
// skip non-regular file
if info.Mode()&os.ModeType != 0 {
return nil
}
// determine file type
fileType := determineFileType(info.Name())
switch fileType {
case FileTypeActiveLog:
if envDry {
log.Printf("%s: will truncate", path)
return nil
}
if err := os.Truncate(path, 0); err != nil {
log.Printf("%s: %s", path, err.Error())
} else {
log.Printf("%s: truncated", path)
}
case FileTypeHistoryLog:
if envDry {
log.Printf("%s: will delete", path)
return nil
}
if err := os.Remove(path); err != nil {
log.Printf("%s: %s", path, err.Error())
} else {
log.Printf("%s: deleted", path)
}
default:
log.Printf("%s: ignored", path)
}
return nil
})
if err != nil {
log.Println("failed to iterate files:", err.Error())
}
}
|
[
"\"CFG_DRY\"",
"\"CFG_ONCE\"",
"\"CFG_CRON\""
] |
[] |
[
"CFG_CRON",
"CFG_DRY",
"CFG_ONCE"
] |
[]
|
["CFG_CRON", "CFG_DRY", "CFG_ONCE"]
|
go
| 3 | 0 | |
column/string_string_test.go
|
package column_test
import (
"context"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vahid-sohrabloo/chconn"
"github.com/vahid-sohrabloo/chconn/column"
)
// var emptyByte = make(string, 1024*10)
func TestStringString(t *testing.T) {
t.Parallel()
connString := os.Getenv("CHX_TEST_TCP_CONN_STRING")
conn, err := chconn.Connect(context.Background(), connString)
require.NoError(t, err)
res, err := conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_string_string`)
require.NoError(t, err)
require.Nil(t, res)
res, err = conn.Exec(context.Background(), `CREATE TABLE test_string_string (
string String,
string_nullable Nullable(String),
string_array Array(String),
string_array_nullable Array(Nullable(String))
) Engine=Memory`)
require.NoError(t, err)
require.Nil(t, res)
col := column.NewString(false)
colArrayValues := column.NewString(false)
colArray := column.NewArray(colArrayValues)
colArrayValuesNil := column.NewString(true)
colArrayNil := column.NewArray(colArrayValuesNil)
colNil := column.NewString(true)
var colInsert []string
var colInsertArray [][]string
var colInsertArrayNil [][]*string
var colNilInsert []*string
rows := 10
for i := 1; i <= rows; i++ {
val := fmt.Sprintf("%d", i)
valArray := []string{val, fmt.Sprintf("%d", i+1)}
valArrayNil := []*string{&val, nil}
col.AppendString(val)
colInsert = append(colInsert, val)
// example insert array
colInsertArray = append(colInsertArray, valArray)
colArray.AppendLen(len(valArray))
for _, v := range valArray {
colArrayValues.AppendString(v)
}
// example insert nullable array
colInsertArrayNil = append(colInsertArrayNil, valArrayNil)
colArrayNil.AppendLen(len(valArrayNil))
for _, v := range valArrayNil {
colArrayValuesNil.AppendStringP(v)
}
// example add nullable
if i%2 == 0 {
colNilInsert = append(colNilInsert, &val)
if i <= rows/2 {
// example to add by pointer
colNil.AppendStringP(&val)
} else {
// example to without pointer
colNil.AppendString(val)
colNil.AppendIsNil(false)
}
} else {
colNilInsert = append(colNilInsert, nil)
if i <= rows/2 {
// example to add by pointer
colNil.AppendP(nil)
} else {
// example to add without pointer
colNil.AppendEmpty()
colNil.AppendIsNil(true)
}
}
}
err = conn.Insert(context.Background(), `INSERT INTO
test_string_string (string,string_nullable,string_array,string_array_nullable)
VALUES`,
col,
colNil,
colArray,
colArrayNil)
require.NoError(t, err)
// example read all
selectStmt, err := conn.Select(context.Background(), `SELECT
string,string_nullable,string_array,string_array_nullable
FROM test_string_string`)
require.NoError(t, err)
require.True(t, conn.IsBusy())
colRead := column.NewString(false)
colNilRead := column.NewString(true)
colArrayReadData := column.NewString(false)
colArrayRead := column.NewArray(colArrayReadData)
colArrayReadDataNil := column.NewString(true)
colArrayReadNil := column.NewArray(colArrayReadDataNil)
var colData []string
var colNilData []*string
var colArrayData [][]string
var colArrayDataNil [][]*string
var colArrayLens []int
for selectStmt.Next() {
err = selectStmt.ReadColumns(colRead, colNilRead, colArrayRead, colArrayReadNil)
require.NoError(t, err)
colRead.ReadAllString(&colData)
colNilRead.ReadAllStringP(&colNilData)
// read array
colArrayLens = colArrayLens[:0]
colArrayRead.ReadAll(&colArrayLens)
for _, l := range colArrayLens {
arr := make([]string, l)
colArrayReadData.FillString(arr)
colArrayData = append(colArrayData, arr)
}
// read nullable array
colArrayLens = colArrayLens[:0]
colArrayRead.ReadAll(&colArrayLens)
for _, l := range colArrayLens {
arr := make([]*string, l)
colArrayReadDataNil.FillStringP(arr)
colArrayDataNil = append(colArrayDataNil, arr)
}
}
assert.Equal(t, colInsert, colData)
assert.Equal(t, colNilInsert, colNilData)
assert.Equal(t, colInsertArray, colArrayData)
assert.Equal(t, colInsertArrayNil, colArrayDataNil)
require.NoError(t, selectStmt.Err())
selectStmt.Close()
// example one by one
selectStmt, err = conn.Select(context.Background(), `SELECT
string,string_nullable,string_array,string_array_nullable
FROM test_string_string`)
require.NoError(t, err)
require.True(t, conn.IsBusy())
colRead = column.NewString(false)
colNilRead = column.NewString(true)
colArrayReadData = column.NewString(false)
colArrayRead = column.NewArray(colArrayReadData)
colArrayReadDataNil = column.NewString(true)
colArrayReadNil = column.NewArray(colArrayReadDataNil)
colData = colData[:0]
colNilData = colNilData[:0]
colArrayData = colArrayData[:0]
colArrayDataNil = colArrayDataNil[:0]
for selectStmt.Next() {
err = selectStmt.ReadColumns(colRead, colNilRead, colArrayRead, colArrayReadNil)
require.NoError(t, err)
for colRead.Next() {
colData = append(colData, colRead.ValueString())
}
// read nullable
for colNilRead.Next() {
colNilData = append(colNilData, colNilRead.ValueStringP())
}
// read array
for colArrayRead.Next() {
arr := make([]string, colArrayRead.Value())
colArrayReadData.FillString(arr)
colArrayData = append(colArrayData, arr)
}
// read nullable array
for colArrayReadNil.Next() {
arr := make([]*string, colArrayReadNil.Value())
colArrayReadDataNil.FillStringP(arr)
colArrayDataNil = append(colArrayDataNil, arr)
}
}
assert.Equal(t, colInsert, colData)
assert.Equal(t, colNilInsert, colNilData)
assert.Equal(t, colInsertArray, colArrayData)
assert.Equal(t, colInsertArrayNil, colArrayDataNil)
require.NoError(t, selectStmt.Err())
selectStmt.Close()
conn.RawConn().Close()
}
|
[
"\"CHX_TEST_TCP_CONN_STRING\""
] |
[] |
[
"CHX_TEST_TCP_CONN_STRING"
] |
[]
|
["CHX_TEST_TCP_CONN_STRING"]
|
go
| 1 | 0 | |
db_test.go
|
package athena
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
uuid "github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
AthenaDatabase = "go_athena_tests"
S3Bucket = "go-athena-tests"
)
func init() {
if v := os.Getenv("ATHENA_DATABASE"); v != "" {
AthenaDatabase = v
}
if v := os.Getenv("S3_BUCKET"); v != "" {
S3Bucket = v
}
}
func TestQuery(t *testing.T) {
harness := setup(t)
// defer harness.teardown()
expected := []dummyRow{
{
SmallintType: 1,
IntType: 2,
BigintType: 3,
BooleanType: true,
DoubleType: 1.32112345,
StringType: "some string",
TimestampType: athenaTimestamp(time.Date(2006, 1, 2, 3, 4, 11, 0, time.UTC)),
},
{
SmallintType: 9,
IntType: 8,
BigintType: 0,
BooleanType: false,
DoubleType: 1.235,
StringType: "another string",
TimestampType: athenaTimestamp(time.Date(2017, 12, 3, 1, 11, 12, 0, time.UTC)),
},
{
SmallintType: 9,
IntType: 8,
BigintType: 0,
BooleanType: false,
DoubleType: 1.235,
StringType: "another string",
TimestampType: athenaTimestamp(time.Date(2017, 12, 3, 20, 11, 12, 0, time.UTC)),
},
}
expectedTypeNames := []string{"varchar", "smallint", "integer", "bigint", "boolean", "double", "varchar", "timestamp"}
harness.uploadData(expected)
rows := harness.mustQuery("select * from %s", harness.table)
index := -1
for rows.Next() {
index++
var row dummyRow
require.NoError(t, rows.Scan(
&row.NullValue,
&row.SmallintType,
&row.IntType,
&row.BigintType,
&row.BooleanType,
&row.DoubleType,
&row.StringType,
&row.TimestampType,
))
assert.Equal(t, expected[index], row, fmt.Sprintf("index: %d", index))
types, err := rows.ColumnTypes()
assert.NoError(t, err, fmt.Sprintf("index: %d", index))
for i, colType := range types {
typeName := colType.DatabaseTypeName()
assert.Equal(t, expectedTypeNames[i], typeName, fmt.Sprintf("index: %d", index))
}
}
require.NoError(t, rows.Err(), "rows.Err()")
require.Equal(t, 2, index+1, "row count")
}
func TestOpen(t *testing.T) {
db, err := Open(Config{
Session: session.Must(session.NewSession()),
Database: AthenaDatabase,
OutputLocation: fmt.Sprintf("s3://%s/noop", S3Bucket),
})
require.NoError(t, err, "Open")
_, err = db.Query("SELECT 1")
require.NoError(t, err, "Query")
}
type dummyRow struct {
NullValue *struct{} `json:"nullValue"`
SmallintType int `json:"smallintType"`
IntType int `json:"intType"`
BigintType int `json:"bigintType"`
BooleanType bool `json:"booleanType"`
DoubleType float64 `json:"doubleType"`
StringType string `json:"stringType"`
TimestampType athenaTimestamp `json:"timestampType"`
}
type athenaHarness struct {
t *testing.T
db *sql.DB
s3 *s3.S3
table string
}
func setup(t *testing.T) *athenaHarness {
harness := athenaHarness{t: t, s3: s3.New(session.New())}
var err error
harness.db, err = sql.Open("athena", fmt.Sprintf("db=%s&output_location=s3://%s/output", AthenaDatabase, S3Bucket))
require.NoError(t, err)
harness.setupTable()
return &harness
}
func (a *athenaHarness) setupTable() {
// tables cannot start with numbers or contain dashes
a.table = "t_" + strings.Replace(uuid.NewV4().String(), "-", "_", -1)
a.mustExec(`CREATE EXTERNAL TABLE %[1]s (
nullValue string,
smallintType smallint,
intType int,
bigintType bigint,
booleanType boolean,
doubleType double,
stringType string,
timestampType timestamp
)
ROW FORMAT SERDE 'org.openx.data.jsonserde.JsonSerDe'
WITH SERDEPROPERTIES (
'serialization.format' = '1'
) LOCATION 's3://%[2]s/%[1]s/';`, a.table, S3Bucket)
fmt.Printf("created table: %s", a.table)
}
func (a *athenaHarness) teardown() {
a.mustExec("drop table %s", a.table)
}
func (a *athenaHarness) mustExec(sql string, args ...interface{}) {
query := fmt.Sprintf(sql, args...)
_, err := a.db.ExecContext(context.TODO(), query)
require.NoError(a.t, err, query)
}
func (a *athenaHarness) mustQuery(sql string, args ...interface{}) *sql.Rows {
query := fmt.Sprintf(sql, args...)
rows, err := a.db.QueryContext(context.TODO(), query)
require.NoError(a.t, err, query)
return rows
}
func (a *athenaHarness) uploadData(rows []dummyRow) {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
for _, row := range rows {
err := enc.Encode(row)
require.NoError(a.t, err)
}
_, err := a.s3.PutObject(&s3.PutObjectInput{
Bucket: aws.String(S3Bucket),
Key: aws.String(fmt.Sprintf("%s/fixture.json", a.table)),
Body: bytes.NewReader(buf.Bytes()),
})
require.NoError(a.t, err)
}
type athenaTimestamp time.Time
func (t athenaTimestamp) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
}
func (t athenaTimestamp) String() string {
return time.Time(t).Format(TimestampLayout)
}
func (t athenaTimestamp) Equal(t2 athenaTimestamp) bool {
return time.Time(t).Equal(time.Time(t2))
}
|
[
"\"ATHENA_DATABASE\"",
"\"S3_BUCKET\""
] |
[] |
[
"ATHENA_DATABASE",
"S3_BUCKET"
] |
[]
|
["ATHENA_DATABASE", "S3_BUCKET"]
|
go
| 2 | 0 | |
internal/config/config.go
|
package config
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
)
var (
// Default input plugins
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
"processes", "disk", "diskio"}
// Default output plugins
outputDefaults = []string{"influxdb"}
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\w+`)
envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
// Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has
// specified
type Config struct {
Tags map[string]string
InputFilters []string
OutputFilters []string
Agent *AgentConfig
Inputs []*models.RunningInput
Outputs []*models.RunningOutput
Aggregators []*models.RunningAggregator
// Processors have a slice wrapper type because they need to be sorted
Processors models.RunningProcessors
}
func NewConfig() *Config {
c := &Config{
// Agent defaults:
Agent: &AgentConfig{
Interval: internal.Duration{Duration: 10 * time.Second},
RoundInterval: true,
FlushInterval: internal.Duration{Duration: 10 * time.Second},
},
Tags: make(map[string]string),
Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*models.RunningOutput, 0),
Processors: make([]*models.RunningProcessor, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
return c
}
type AgentConfig struct {
// Interval at which to gather information
Interval internal.Duration
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// By default or when set to "0s", precision will be set to the same
// timestamp order as the collection interval, with the maximum being 1s.
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
// Precision will NOT be used for service inputs. It is up to each individual
// service input to set the timestamp at the appropriate precision.
Precision internal.Duration
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
CollectionJitter internal.Duration
// FlushInterval is the Interval at which to flush data
FlushInterval internal.Duration
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
FlushJitter internal.Duration
// MetricBatchSize is the maximum number of metrics that is wrote to an
// output plugin in one call.
MetricBatchSize int
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
// full, the oldest metrics will be overwritten. This number should be a
// multiple of MetricBatchSize. Due to current implementation, this could
// not be less than 2 times MetricBatchSize.
MetricBufferLimit int
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
FlushBufferWhenFull bool
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatibility
UTC bool `toml:"utc"`
// Debug is the option for running in debug mode
Debug bool
// Logfile specifies the file to send logs to
Logfile string
// Quiet is the option for running in quiet mode
Quiet bool
Hostname string
OmitHostname bool
}
// Inputs returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
for _, input := range c.Inputs {
name = append(name, input.Name())
}
return name
}
// Outputs returns a list of strings of the configured outputs.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
name = append(name, output.Name)
}
return name
}
// ListTags returns a string of tags specified in the config,
// line-protocol style
func (c *Config) ListTags() string {
var tags []string
for k, v := range c.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tags)
return strings.Join(tags, " ")
}
var header = `# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply prepend
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
## Run telegraf with debug log messages.
debug = false
## Run telegraf in quiet mode (error log messages only).
quiet = false
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
`
var processorHeader = `
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
`
var aggregatorHeader = `
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
`
var inputHeader = `
###############################################################################
# INPUT PLUGINS #
###############################################################################
`
var serviceInputHeader = `
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
fmt.Printf(header)
// print output plugins
if len(outputFilters) != 0 {
printFilteredOutputs(outputFilters, false)
} else {
printFilteredOutputs(outputDefaults, false)
// Print non-default outputs, commented
var pnames []string
for pname := range outputs.Outputs {
if !sliceContains(pname, outputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredOutputs(pnames, true)
}
// print processor plugins
fmt.Printf(processorHeader)
if len(processorFilters) != 0 {
printFilteredProcessors(processorFilters, false)
} else {
pnames := []string{}
for pname := range processors.Processors {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredProcessors(pnames, true)
}
// pring aggregator plugins
fmt.Printf(aggregatorHeader)
if len(aggregatorFilters) != 0 {
printFilteredAggregators(aggregatorFilters, false)
} else {
pnames := []string{}
for pname := range aggregators.Aggregators {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredAggregators(pnames, true)
}
// print input plugins
fmt.Printf(inputHeader)
if len(inputFilters) != 0 {
printFilteredInputs(inputFilters, false)
} else {
printFilteredInputs(inputDefaults, false)
// Print non-default inputs, commented
var pnames []string
for pname := range inputs.Inputs {
if !sliceContains(pname, inputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredInputs(pnames, true)
}
}
func printFilteredProcessors(processorFilters []string, commented bool) {
// Filter processors
var pnames []string
for pname := range processors.Processors {
if sliceContains(pname, processorFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Outputs
for _, pname := range pnames {
creator := processors.Processors[pname]
output := creator()
printConfig(pname, output, "processors", commented)
}
}
func printFilteredAggregators(aggregatorFilters []string, commented bool) {
// Filter outputs
var anames []string
for aname := range aggregators.Aggregators {
if sliceContains(aname, aggregatorFilters) {
anames = append(anames, aname)
}
}
sort.Strings(anames)
// Print Outputs
for _, aname := range anames {
creator := aggregators.Aggregators[aname]
output := creator()
printConfig(aname, output, "aggregators", commented)
}
}
func printFilteredInputs(inputFilters []string, commented bool) {
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if sliceContains(pname, inputFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// cache service inputs to print them at the end
servInputs := make(map[string]telegraf.ServiceInput)
// for alphabetical looping:
servInputNames := []string{}
// Print Inputs
for _, pname := range pnames {
creator := inputs.Inputs[pname]
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
servInputs[pname] = p
servInputNames = append(servInputNames, pname)
continue
}
printConfig(pname, input, "inputs", commented)
}
// Print Service Inputs
if len(servInputs) == 0 {
return
}
sort.Strings(servInputNames)
fmt.Printf(serviceInputHeader)
for _, name := range servInputNames {
printConfig(name, servInputs[name], "inputs", commented)
}
}
func printFilteredOutputs(outputFilters []string, commented bool) {
// Filter outputs
var onames []string
for oname := range outputs.Outputs {
if sliceContains(oname, outputFilters) {
onames = append(onames, oname)
}
}
sort.Strings(onames)
// Print Outputs
for _, oname := range onames {
creator := outputs.Outputs[oname]
output := creator()
printConfig(oname, output, "outputs", commented)
}
}
type printer interface {
Description() string
SampleConfig() string
}
func printConfig(name string, p printer, op string, commented bool) {
comment := ""
if commented {
comment = "# "
}
fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment,
op, name)
config := p.SampleConfig()
if config == "" {
fmt.Printf("\n%s # no configuration\n\n", comment)
} else {
lines := strings.Split(config, "\n")
for i, line := range lines {
if i == 0 || i == len(lines)-1 {
fmt.Print("\n")
continue
}
fmt.Print(strings.TrimRight(comment+line, " ") + "\n")
}
}
}
func sliceContains(name string, list []string) bool {
for _, b := range list {
if b == name {
return true
}
}
return false
}
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string) error {
if creator, ok := inputs.Inputs[name]; ok {
printConfig(name, creator(), "inputs", false)
} else {
return errors.New(fmt.Sprintf("Input %s not found", name))
}
return nil
}
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator(), "outputs", false)
} else {
return errors.New(fmt.Sprintf("Output %s not found", name))
}
return nil
}
func (c *Config) LoadDirectory(path string) error {
walkfn := func(thispath string, info os.FileInfo, _ error) error {
if info == nil {
log.Printf("W! Telegraf is not permitted to read %s", thispath)
return nil
}
if info.IsDir() {
if strings.HasPrefix(info.Name(), "..") {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath.SkipDir
}
return nil
}
name := info.Name()
if len(name) < 6 || name[len(name)-5:] != ".conf" {
return nil
}
err := c.LoadConfig(thispath)
if err != nil {
return err
}
return nil
}
return filepath.Walk(path, walkfn)
}
// Try to find a default config file at these locations (in order):
// 1. $TELEGRAF_CONFIG_PATH
// 2. $HOME/.telegraf/telegraf.conf
// 3. /etc/telegraf/telegraf.conf
//
func getDefaultConfigPath() (string, error) {
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
etcfile := "/etc/telegraf/telegraf.conf"
if runtime.GOOS == "windows" {
etcfile = `C:\Program Files\Telegraf\telegraf.conf`
}
for _, path := range []string{envfile, homefile, etcfile} {
if _, err := os.Stat(path); err == nil {
log.Printf("I! Using config file: %s", path)
return path, nil
}
}
// if we got here, we didn't find a file in a default location
return "", fmt.Errorf("No config file specified, and could not find one"+
" in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile)
}
// LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error {
var err error
if path == "" {
if path, err = getDefaultConfigPath(); err != nil {
return err
}
}
tbl, err := parseFile(path)
if err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
// Parse tags tables first:
for _, tableName := range []string{"tags", "global_tags"} {
if val, ok := tbl.Fields[tableName]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("E! Could not parse [global_tags] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
}
// Parse agent table:
if val, ok := tbl.Fields["agent"]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("E! Could not parse [agent] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
// Parse all the rest of the plugins:
for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
switch name {
case "agent", "global_tags", "tags":
case "outputs":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [outputs.influxdb] support
case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addOutput(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "inputs", "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [inputs.cpu] support
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "processors":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addProcessor(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "aggregators":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addAggregator(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
if err = c.addInput(name, subTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
}
if len(c.Processors) > 1 {
sort.Sort(c.Processors)
}
return nil
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatibility only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv(value string) string {
return envVarEscaper.Replace(value)
}
// parseFile loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseFile(fpath string) (*ast.Table, error) {
contents, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
// ugh windows why
contents = trimBOM(contents)
env_vars := envVarRe.FindAll(contents, -1)
for _, env_var := range env_vars {
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
if ok {
env_val = escapeEnv(env_val)
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
}
}
return toml.Parse(contents)
}
func (c *Config) addAggregator(name string, table *ast.Table) error {
creator, ok := aggregators.Aggregators[name]
if !ok {
return fmt.Errorf("Undefined but requested aggregator: %s", name)
}
aggregator := creator()
conf, err := buildAggregator(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, aggregator); err != nil {
return err
}
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
return nil
}
func (c *Config) addProcessor(name string, table *ast.Table) error {
creator, ok := processors.Processors[name]
if !ok {
return fmt.Errorf("Undefined but requested processor: %s", name)
}
processor := creator()
processorConfig, err := buildProcessor(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, processor); err != nil {
return err
}
rf := &models.RunningProcessor{
Name: name,
Processor: processor,
Config: processorConfig,
}
c.Processors = append(c.Processors, rf)
return nil
}
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
}
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("Undefined but requested output: %s", name)
}
output := creator()
// If the output has a SetSerializer function, then this means it can write
// arbitrary types of output, so build the serializer and set it.
switch t := output.(type) {
case serializers.SerializerOutput:
serializer, err := buildSerializer(name, table)
if err != nil {
return err
}
t.SetSerializer(serializer)
}
outputConfig, err := buildOutput(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, output); err != nil {
return err
}
ro := models.NewRunningOutput(name, output, outputConfig,
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
c.Outputs = append(c.Outputs, ro)
return nil
}
func (c *Config) addInput(name string, table *ast.Table) error {
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
return nil
}
// Legacy support renaming io input to diskio
if name == "io" {
name = "diskio"
}
creator, ok := inputs.Inputs[name]
if !ok {
return fmt.Errorf("Undefined but requested input: %s", name)
}
input := creator()
// If the input has a SetParser function, then this means it can accept
// arbitrary types of input, so build the parser and set it.
switch t := input.(type) {
case parsers.ParserInput:
parser, err := buildParser(name, table)
if err != nil {
return err
}
t.SetParser(parser)
}
pluginConfig, err := buildInput(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, input); err != nil {
return err
}
rp := models.NewRunningInput(input, pluginConfig)
c.Inputs = append(c.Inputs, rp)
return nil
}
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
unsupportedFields := []string{"tagexclude", "taginclude"}
for _, field := range unsupportedFields {
if _, ok := tbl.Fields[field]; ok {
return nil, fmt.Errorf("%s is not supported for aggregator plugins (%s).",
field, name)
}
}
conf := &models.AggregatorConfig{
Name: name,
Delay: time.Millisecond * 100,
Period: time.Second * 30,
}
if node, ok := tbl.Fields["period"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Period = dur
}
}
}
if node, ok := tbl.Fields["delay"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Delay = dur
}
}
}
if node, ok := tbl.Fields["drop_original"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
conf.DropOriginal, err = strconv.ParseBool(b.Value)
if err != nil {
log.Printf("Error parsing boolean value for %s: %s\n", name, err)
}
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.MeasurementPrefix = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.MeasurementSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.NameOverride = str.Value
}
}
}
conf.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name)
}
}
}
//not deleting period from the table as we intend to add this field to aggregator plugin as well
delete(tbl.Fields, "period")
delete(tbl.Fields, "delay")
delete(tbl.Fields, "drop_original")
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "tags")
var err error
conf.Filter, err = buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
conf := &models.ProcessorConfig{Name: name}
unsupportedFields := []string{"tagexclude", "taginclude", "fielddrop", "fieldpass"}
for _, field := range unsupportedFields {
if _, ok := tbl.Fields[field]; ok {
return nil, fmt.Errorf("%s is not supported for processor plugins (%s).",
field, name)
}
}
if node, ok := tbl.Fields["order"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Integer); ok {
var err error
conf.Order, err = strconv.ParseInt(b.Value, 10, 64)
if err != nil {
log.Printf("Error parsing int value for %s: %s\n", name, err)
}
}
}
}
delete(tbl.Fields, "order")
var err error
conf.Filter, err = buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the models.OutputConfig/models.InputConfig
// to be used for glob filtering on tags and measurements
func buildFilter(tbl *ast.Table) (models.Filter, error) {
f := models.Filter{}
if node, ok := tbl.Fields["namepass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.NamePass = append(f.NamePass, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["namedrop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.NameDrop = append(f.NameDrop, str.Value)
}
}
}
}
}
fields := []string{"pass", "fieldpass"}
for _, field := range fields {
if node, ok := tbl.Fields[field]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldPass = append(f.FieldPass, str.Value)
}
}
}
}
}
}
fields = []string{"drop", "fielddrop"}
for _, field := range fields {
if node, ok := tbl.Fields[field]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldDrop = append(f.FieldDrop, str.Value)
}
}
}
}
}
}
if node, ok := tbl.Fields["tagpass"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
f.TagPass = append(f.TagPass, *tagfilter)
}
}
}
}
if node, ok := tbl.Fields["tagdrop"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
f.TagDrop = append(f.TagDrop, *tagfilter)
}
}
}
}
if node, ok := tbl.Fields["tagexclude"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.TagExclude = append(f.TagExclude, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["taginclude"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.TagInclude = append(f.TagInclude, str.Value)
}
}
}
}
}
if err := f.Compile(); err != nil {
return f, err
}
delete(tbl.Fields, "namedrop")
delete(tbl.Fields, "namepass")
delete(tbl.Fields, "fielddrop")
delete(tbl.Fields, "fieldpass")
delete(tbl.Fields, "drop")
delete(tbl.Fields, "pass")
delete(tbl.Fields, "tagdrop")
delete(tbl.Fields, "tagpass")
delete(tbl.Fields, "tagexclude")
delete(tbl.Fields, "taginclude")
return f, nil
}
// buildInput parses input specific items from the ast.Table,
// builds the filter and returns a
// models.InputConfig to be inserted into models.RunningInput
func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp := &models.InputConfig{Name: name}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
cp.Interval = dur
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.MeasurementPrefix = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.MeasurementSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.NameOverride = str.Value
}
}
}
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("E! Could not parse tags for input %s\n", name)
}
}
}
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "interval")
delete(tbl.Fields, "tags")
var err error
cp.Filter, err = buildFilter(tbl)
if err != nil {
return cp, err
}
return cp, nil
}
// buildParser grabs the necessary entries from the ast.Table for creating
// a parsers.Parser object, and creates it, which can then be added onto
// an Input object.
func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
c := &parsers.Config{}
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataFormat = str.Value
}
}
}
// Legacy support, exec plugin originally parsed JSON by default.
if name == "exec" && c.DataFormat == "" {
c.DataFormat = "json"
} else if c.DataFormat == "" {
c.DataFormat = "influx"
}
if node, ok := tbl.Fields["separator"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Separator = str.Value
}
}
}
if node, ok := tbl.Fields["templates"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.Templates = append(c.Templates, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["tag_keys"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.TagKeys = append(c.TagKeys, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["data_type"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataType = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdAuthFile = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_security_level"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdSecurityLevel = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardMetricRegistryPath = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_time_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTimePath = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_time_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTimeFormat = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_tags_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTagsPath = str.Value
}
}
}
c.DropwizardTagPathsMap = make(map[string]string)
if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTagPathsMap[name] = str.Value
}
}
}
}
}
c.MetricName = name
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "separator")
delete(tbl.Fields, "templates")
delete(tbl.Fields, "tag_keys")
delete(tbl.Fields, "data_type")
delete(tbl.Fields, "collectd_auth_file")
delete(tbl.Fields, "collectd_security_level")
delete(tbl.Fields, "collectd_typesdb")
delete(tbl.Fields, "dropwizard_metric_registry_path")
delete(tbl.Fields, "dropwizard_time_path")
delete(tbl.Fields, "dropwizard_time_format")
delete(tbl.Fields, "dropwizard_tags_path")
delete(tbl.Fields, "dropwizard_tag_paths")
return parsers.NewParser(c)
}
// buildSerializer grabs the necessary entries from the ast.Table for creating
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataFormat = str.Value
}
}
}
if c.DataFormat == "" {
c.DataFormat = "influx"
}
if node, ok := tbl.Fields["prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Prefix = str.Value
}
}
}
if node, ok := tbl.Fields["template"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Template = str.Value
}
}
}
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.InfluxMaxLineBytes = int(v)
}
}
}
if node, ok := tbl.Fields["influx_sort_fields"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.InfluxSortFields, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["influx_uint_support"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.InfluxUintSupport, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["graphite_tag_support"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.GraphiteTagSupport, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
timestampVal, err := time.ParseDuration(str.Value)
if err != nil {
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
}
// now that we have a duration, truncate it to the nearest
// power of ten (just in case)
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
c.TimestampUnits = time.Duration(new_nanoseconds)
}
}
}
delete(tbl.Fields, "influx_max_line_bytes")
delete(tbl.Fields, "influx_sort_fields")
delete(tbl.Fields, "influx_uint_support")
delete(tbl.Fields, "graphite_tag_support")
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "prefix")
delete(tbl.Fields, "template")
delete(tbl.Fields, "json_timestamp_units")
return serializers.NewSerializer(c)
}
// buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an
// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
filter, err := buildFilter(tbl)
if err != nil {
return nil, err
}
oc := &models.OutputConfig{
Name: name,
Filter: filter,
}
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
if len(oc.Filter.FieldDrop) > 0 {
oc.Filter.NameDrop = oc.Filter.FieldDrop
}
if len(oc.Filter.FieldPass) > 0 {
oc.Filter.NamePass = oc.Filter.FieldPass
}
return oc, nil
}
|
[
"\"TELEGRAF_CONFIG_PATH\""
] |
[] |
[
"TELEGRAF_CONFIG_PATH"
] |
[]
|
["TELEGRAF_CONFIG_PATH"]
|
go
| 1 | 0 | |
kafka.go
|
package kafka
import (
"bytes"
"fmt"
"log"
"os"
"strconv"
"strings"
"text/template"
"time"
"crypto/tls"
"io/ioutil"
"github.com/gliderlabs/logspout/router"
"gopkg.in/Shopify/sarama.v1"
)
func init() {
router.AdapterFactories.Register(NewKafkaAdapter, "kafka")
}
type KafkaAdapter struct {
route *router.Route
brokers []string
topic string
producer sarama.AsyncProducer
tmpl *template.Template
}
func NewKafkaAdapter(route *router.Route) (router.LogAdapter, error) {
brokers := readBrokers(route.Address)
if len(brokers) == 0 {
return nil, errorf("The Kafka broker host:port is missing. Did you specify it as a route address?")
}
topic := readTopic(route.Address, route.Options)
if topic == "" {
return nil, errorf("The Kafka topic is missing. Did you specify it as a route option?")
}
var err error
cert_file := os.Getenv("TLS_CERT_FILE")
key_file := os.Getenv("TLS_PRIVKEY_FILE")
var tmpl *template.Template
if text := os.Getenv("KAFKA_TEMPLATE"); text != "" {
tmpl, err = template.New("kafka").Parse(text)
if err != nil {
return nil, errorf("Couldn't parse Kafka message template. %v", err)
}
}
if os.Getenv("DEBUG") != "" {
log.Printf("Starting Kafka producer for address: %s, topic: %s.\n", brokers, topic)
}
var retries int
retries, err = strconv.Atoi(os.Getenv("KAFKA_CONNECT_RETRIES"))
if err != nil {
retries = 3
}
var producer sarama.AsyncProducer
if os.Getenv("DEBUG") != "" {
log.Println("Generating Kafka configuration.")
}
config := newConfig()
if (cert_file != "") && (key_file != "") {
if os.Getenv("DEBUG") != "" {
log.Println("Enabling Kafka TLS support.")
}
certfile, err := os.Open(cert_file)
if err != nil {
return nil, errorf("Couldn't open TLS certificate file: %s", err)
}
keyfile, err := os.Open(key_file)
if err != nil {
return nil, errorf("Couldn't open TLS private key file: %s", err)
}
tls_cert, err := ioutil.ReadAll(certfile)
if err != nil {
return nil, errorf("Couldn't read TLS certificate: %s", err)
}
tls_privkey, err := ioutil.ReadAll(keyfile)
if err != nil {
return nil, errorf("Couldn't read TLS private key: %s", err)
}
keypair, err := tls.X509KeyPair([]byte(tls_cert), []byte(tls_privkey))
if err != nil {
return nil, errorf("Couldn't establish TLS authentication keypair. Check TLS_CERT and TLS_PRIVKEY environment vars.")
}
tls_configuration := &tls.Config{
Certificates: []tls.Certificate{keypair},
InsecureSkipVerify: false,
}
config.Net.TLS.Config = tls_configuration
config.Net.TLS.Enable = true
}
for i := 0; i < retries; i++ {
producer, err = sarama.NewAsyncProducer(brokers, config)
if err != nil {
if os.Getenv("DEBUG") != "" {
log.Println("Couldn't create Kafka producer. Retrying...", err)
}
if i == retries-1 {
return nil, errorf("Couldn't create Kafka producer. %v", err)
}
} else {
time.Sleep(1 * time.Second)
}
}
return &KafkaAdapter{
route: route,
brokers: brokers,
topic: topic,
producer: producer,
tmpl: tmpl,
}, nil
}
func (a *KafkaAdapter) Stream(logstream chan *router.Message) {
defer a.producer.Close()
for rm := range logstream {
message, err := a.formatMessage(rm)
if err != nil {
log.Println("kafka:", err)
a.route.Close()
break
}
a.producer.Input() <- message
}
}
func newConfig() *sarama.Config {
config := sarama.NewConfig()
config.ClientID = "logspout"
config.Producer.Return.Errors = false
config.Producer.Return.Successes = false
config.Producer.Flush.Frequency = 1 * time.Second
config.Producer.RequiredAcks = sarama.WaitForLocal
if opt := os.Getenv("KAFKA_COMPRESSION_CODEC"); opt != "" {
switch opt {
case "gzip":
config.Producer.Compression = sarama.CompressionGZIP
case "snappy":
config.Producer.Compression = sarama.CompressionSnappy
}
}
return config
}
func (a *KafkaAdapter) formatMessage(message *router.Message) (*sarama.ProducerMessage, error) {
var encoder sarama.Encoder
if a.tmpl != nil {
var w bytes.Buffer
if err := a.tmpl.Execute(&w, message); err != nil {
return nil, err
}
encoder = sarama.ByteEncoder(w.Bytes())
} else {
encoder = sarama.StringEncoder(message.Data)
}
return &sarama.ProducerMessage{
Topic: a.topic,
Value: encoder,
}, nil
}
func readBrokers(address string) []string {
if strings.Contains(address, "/") {
slash := strings.Index(address, "/")
address = address[:slash]
}
return strings.Split(address, ",")
}
func readTopic(address string, options map[string]string) string {
var topic string
if !strings.Contains(address, "/") {
topic = options["topic"]
} else {
slash := strings.Index(address, "/")
topic = address[slash+1:]
}
return topic
}
func errorf(format string, a ...interface{}) (err error) {
err = fmt.Errorf(format, a...)
if os.Getenv("DEBUG") != "" {
fmt.Println(err.Error())
}
return
}
|
[
"\"TLS_CERT_FILE\"",
"\"TLS_PRIVKEY_FILE\"",
"\"KAFKA_TEMPLATE\"",
"\"DEBUG\"",
"\"KAFKA_CONNECT_RETRIES\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"KAFKA_COMPRESSION_CODEC\"",
"\"DEBUG\""
] |
[] |
[
"TLS_CERT_FILE",
"KAFKA_CONNECT_RETRIES",
"KAFKA_COMPRESSION_CODEC",
"TLS_PRIVKEY_FILE",
"DEBUG",
"KAFKA_TEMPLATE"
] |
[]
|
["TLS_CERT_FILE", "KAFKA_CONNECT_RETRIES", "KAFKA_COMPRESSION_CODEC", "TLS_PRIVKEY_FILE", "DEBUG", "KAFKA_TEMPLATE"]
|
go
| 6 | 0 | |
tests/unit/kubernetes/test_Ssh.py
|
import os
from unittest import TestCase
import pytest
from dotenv import load_dotenv
from pytest import skip
from osbot_utils.utils.Files import file_not_exists
from osbot_k8s.kubernetes.Ssh import Ssh
@pytest.mark.skip('needs live server') # todo add to test setup the creation of pods and nodes we can SSH into
class test_Ssh(TestCase):
def setUp(self) -> None:
load_dotenv()
self.ssh_config = {
"user" : os.environ.get('TEST_SSH_USER' ),
"server" : os.environ.get('TEST_SSH_SERVER'),
"ssh_key" : os.environ.get('TEST_SSH_KEY' )
}
if file_not_exists(self.ssh_config.get('ssh_key')):
skip('no ssh key in current test environment')
self.ssh = Ssh(ssh_config=self.ssh_config)
print()
# base methods
def test_server_in_known_hosts(self):
result = self.ssh.server_in_known_hosts() # todo: add method to programatically add the server to the known_hosts file
assert type(result) is bool
def test_exec_ssh_command(self):
assert self.ssh.exec_ssh_command('uname') == {'error': '', 'output': 'Linux\n', 'status': True}
assert self.ssh.exec_ssh_command('aaaa' ) == {'error': 'bash: aaaa: command not found\n', 'output': '', 'status': False}
def test_get_get_scp_params(self):
source_file = 'source_file'
target_file = 'target_file'
ssh_params = self.ssh.get_scp_params(source_file, target_file)
assert ssh_params == ['-i', self.ssh_config.get('ssh_key'),
f"{self.ssh_config.get('user')}@{self.ssh_config.get('server')}:{source_file}",
target_file]
def test_get_get_ssh_params(self):
ssh_params = self.ssh.get_ssh_params('aaa')
assert ssh_params == ['-o StrictHostKeyChecking=no',
'-t', '-i', self.ssh_config.get('ssh_key'),
self.ssh_config.get('user') + '@' + self.ssh_config.get('server'),
'aaa']
def test_exec(self):
assert 'bin' in self.ssh.exec('cd /; ls')
# helper methods
def test_uname(self):
assert self.ssh.uname() == 'Linux'
# def create_pods(self, count):
# return self.ssh.exec(f'/home/ubuntu/icap-infrastructure/scripts/create_pod.sh {count}')
#
# def test_created_pod(self):
# self.create_pods(1)
# #assert 'bin' in self.ssh.exec('ls')
# # helper methods: esxcli
|
[] |
[] |
[
"TEST_SSH_KEY' ",
"TEST_SSH_SERVER",
"TEST_SSH_USER' "
] |
[]
|
["TEST_SSH_KEY' ", "TEST_SSH_SERVER", "TEST_SSH_USER' "]
|
python
| 3 | 0 | |
main/plasmaBank/asgi.py
|
"""
ASGI config for plasmaBank project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plasmaBank.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
protokube/pkg/gossip/mesh/gossip.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mesh
import (
"fmt"
"net"
"os"
"strconv"
"time"
"github.com/golang/glog"
"github.com/weaveworks/mesh"
"k8s.io/kops/protokube/pkg/gossip"
)
type MeshGossiper struct {
seeds gossip.SeedProvider
router *mesh.Router
peer *peer
version uint64
lastSnapshot *gossip.GossipStateSnapshot
}
func NewMeshGossiper(listen string, channelName string, nodeName string, password []byte, seeds gossip.SeedProvider) (*MeshGossiper, error) {
connLimit := 64
gossipDnsConnLimit := os.Getenv("GOSSIP_DNS_CONN_LIMIT")
if gossipDnsConnLimit != "" {
limit, err := strconv.Atoi(gossipDnsConnLimit)
if err != nil {
return nil, fmt.Errorf("cannot parse env GOSSIP_DNS_CONN_LIMIT value: %v, err:%v", gossipDnsConnLimit, err)
}
connLimit = limit
}
glog.Infof("gossip dns connection limit is:%d", connLimit)
meshConfig := mesh.Config{
ProtocolMinVersion: mesh.ProtocolMinVersion,
Password: password,
ConnLimit: connLimit,
PeerDiscovery: true,
//TrustedSubnets: []*net.IPNet{},
}
{
host, portString, err := net.SplitHostPort(listen)
if err != nil {
return nil, fmt.Errorf("cannot parse -listen flag: %v", listen)
}
port, err := strconv.Atoi(portString)
if err != nil {
return nil, fmt.Errorf("cannot parse -listen flag: %v", listen)
}
meshConfig.Host = host
meshConfig.Port = port
}
meshName, err := mesh.PeerNameFromUserInput(nodeName)
if err != nil {
return nil, fmt.Errorf("error parsing peer name: %v", err)
}
nickname := nodeName
logger := &glogLogger{}
router := mesh.NewRouter(meshConfig, meshName, nickname, mesh.NullOverlay{}, logger)
peer := newPeer(meshName)
gossip := router.NewGossip(channelName, peer)
peer.register(gossip)
gossiper := &MeshGossiper{
seeds: seeds,
router: router,
peer: peer,
}
return gossiper, nil
}
func (g *MeshGossiper) Start() error {
//glog.Infof("mesh router starting (%s)", *meshListen)
g.router.Start()
defer func() {
glog.Infof("mesh router stopping")
g.router.Stop()
}()
g.runSeeding()
return nil
}
func (g *MeshGossiper) runSeeding() {
for {
glog.V(2).Infof("Querying for seeds")
seeds, err := g.seeds.GetSeeds()
if err != nil {
glog.Warningf("error getting seeds: %v", err)
time.Sleep(1 * time.Minute)
continue
}
glog.Infof("Got seeds: %s", seeds)
// TODO: Include ourselves? Exclude ourselves?
removeOthers := false
errors := g.router.ConnectionMaker.InitiateConnections(seeds, removeOthers)
if len(errors) != 0 {
for _, err := range errors {
glog.Infof("error connecting to seeds: %v", err)
}
time.Sleep(1 * time.Minute)
continue
}
glog.V(2).Infof("Seeding successful")
// Reseed periodically, just in case of partitions
// TODO: Make it so that only one node polls, or at least statistically get close
time.Sleep(60 * time.Minute)
}
}
func (g *MeshGossiper) Snapshot() *gossip.GossipStateSnapshot {
return g.peer.snapshot()
}
func (g *MeshGossiper) UpdateValues(removeKeys []string, putEntries map[string]string) error {
glog.V(2).Infof("UpdateValues: remove=%s, put=%s", removeKeys, putEntries)
return g.peer.updateValues(removeKeys, putEntries)
}
|
[
"\"GOSSIP_DNS_CONN_LIMIT\""
] |
[] |
[
"GOSSIP_DNS_CONN_LIMIT"
] |
[]
|
["GOSSIP_DNS_CONN_LIMIT"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/gorilla/mux"
"github.com/hashicorp/faas-nomad/consul"
"github.com/hashicorp/faas-nomad/handlers"
"github.com/hashicorp/faas-nomad/metrics"
"github.com/hashicorp/faas-nomad/nomad"
fntypes "github.com/hashicorp/faas-nomad/types"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/api"
bootstrap "github.com/openfaas/faas-provider"
"github.com/openfaas/faas-provider/types"
)
var version = "notset"
var (
port = flag.Int("port", 8080, "Port to bind the server to")
statsdServer = flag.String("statsd_addr", "localhost:8125", "Location for the statsd collector")
nodeURI = flag.String("node_addr", "localhost", "URI of the current Nomad node, this address is used for reporting and logging")
nomadAddr = flag.String("nomad_addr", "localhost:4646", "Address for Nomad API endpoint")
consulAddr = flag.String("consul_addr", "http://localhost:8500", "Address for Consul API endpoint")
consulACL = flag.String("consul_acl", "", "ACL token for Consul API, only required if ACL are enabled in Consul")
enableConsulDNS = flag.Bool("enable_consul_dns", false, "Uses the consul_addr as a default DNS server. Assumes DNS interface is listening on port 53")
nomadRegion = flag.String("nomad_region", "global", "Default region to schedule functions in")
enableBasicAuth = flag.Bool("enable_basic_auth", false, "Flag for enabling basic authentication on gateway endpoints")
basicAuthSecretPath = flag.String("basic_auth_secret_path", "/secrets", "The directory path to the basic auth secret file")
vaultDefaultPolicy = flag.String("vault_default_policy", "openfaas", "The default policy used when secrets are deployed with a function")
vaultSecretPathPrefix = flag.String("vault_secret_path_prefix", "secret/openfaas", "The Vault k/v path prefix used when secrets are deployed with a function")
)
var functionTimeout = flag.Duration("function_timeout", 30*time.Second, "Timeout for function execution")
var (
loggerFormat = flag.String("logger_format", "text", "Format for log output text | json")
loggerLevel = flag.String("logger_level", "INFO", "Log output level INFO | ERROR | DEBUG | TRACE")
loggerOutput = flag.String("logger_output", "", "Filepath to write log file, if omitted stdOut is used")
)
// parseDeprecatedEnvironment is used to merge the previous environment variable configuration to the new flag style
// this will be removed in the next release
func parseDeprecatedEnvironment() {
checkDeprecatedStatsD()
checkDeprecatedNomadHTTP()
checkDeprecatedNomadAddr()
checkDeprecatedConsulAddr()
checkDeprecatedNomadRegion()
checkDeprecatedLoggerLevel()
checkDeprecatedLoggerFormat()
checkDeprecatedLoggerOutput()
}
func checkDeprecatedStatsD() {
if env := os.Getenv("STATSD_ADDR"); env != "" {
*statsdServer = env
log.Println("The environment variable STATSD_ADDR is deprecated please use the command line flag stasd_server")
}
}
func checkDeprecatedNomadHTTP() {
if env := os.Getenv("NOMAD_ADDR_http"); env != "" {
*nodeURI = env
log.Println("The environment variable NOMAD_ADDR_http is deprecated please use the command line flag node_uri")
}
}
func checkDeprecatedNomadAddr() {
if env := os.Getenv("NOMAD_ADDR"); env != "" {
*nomadAddr = env
log.Println("The environment variable NOMAD_ADDR is deprecated please use the command line flag nomad_addr")
}
}
func checkDeprecatedConsulAddr() {
if env := os.Getenv("CONSUL_ADDR"); env != "" {
*consulAddr = env
log.Println("The environment variable CONSUL_ADDR is deprecated please use the command line flag consul_addr")
}
}
func checkDeprecatedNomadRegion() {
if env := os.Getenv("NOMAD_REGION"); env != "" {
*nomadRegion = env
log.Println("The environment variable NOMAD_REGION is deprecated please use the command line flag nomad_region")
}
}
func checkDeprecatedLoggerLevel() {
if env := os.Getenv("logger_level"); env != "" {
*loggerLevel = env
log.Println("The environment variable logger_level is deprecated please use the command line flag logger_level")
}
}
func checkDeprecatedLoggerFormat() {
if env := os.Getenv("logger_format"); env != "" {
*loggerFormat = env
log.Println("The environment variable logger_format is deprecated please use the command line flag logger_format")
}
}
func checkDeprecatedLoggerOutput() {
if env := os.Getenv("logger_output"); env != "" {
*loggerOutput = env
log.Println("The environment variable logger_output is deprecated please use the command line flag logger_output")
}
}
func main() {
flag.Parse()
parseDeprecatedEnvironment() // to be removed in 0.3.0
logger, stats, nomadClient, consulResolver := makeDependencies(
*statsdServer,
*nodeURI,
*nomadAddr,
*consulAddr,
*consulACL,
*nomadRegion,
)
logger.Info("Started version: " + version)
stats.Incr("started", nil, 1)
handlers := createFaaSHandlers(nomadClient, consulResolver, stats, logger)
config := &types.FaaSConfig{}
config.ReadTimeout = *functionTimeout
config.WriteTimeout = *functionTimeout
config.TCPPort = port
config.EnableHealth = true
config.EnableBasicAuth = *enableBasicAuth
config.SecretMountPath = *basicAuthSecretPath
logger.Info("Started Nomad provider", "port", *config.TCPPort)
logger.Info("Basic authentication", "enabled", fmt.Sprintf("%t", config.EnableBasicAuth))
bootstrap.Serve(handlers, config)
}
func createFaaSHandlers(nomadClient *api.Client, consulResolver *consul.Resolver, stats *statsd.Client, logger hclog.Logger) *types.FaaSHandlers {
datacenter, err := nomadClient.Agent().Datacenter()
if err != nil {
logger.Error("Error returning the agent's datacenter", err)
datacenter = "dc1"
}
logger.Info("Datacenter from agent: " + datacenter)
providerConfig := &fntypes.ProviderConfig{
VaultDefaultPolicy: *vaultDefaultPolicy,
VaultSecretPathPrefix: *vaultSecretPathPrefix,
Datacenter: datacenter,
ConsulAddress: *consulAddr,
ConsulDNSEnabled: *enableConsulDNS,
}
return &types.FaaSHandlers{
FunctionReader: handlers.MakeReader(nomadClient.Jobs(), logger, stats),
DeployHandler: handlers.MakeDeploy(nomadClient.Jobs(), *providerConfig, logger, stats),
DeleteHandler: handlers.MakeDelete(consulResolver, nomadClient.Jobs(), logger, stats),
ReplicaReader: makeReplicationReader(nomadClient.Jobs(), logger, stats),
ReplicaUpdater: makeReplicationUpdater(nomadClient.Jobs(), logger, stats),
FunctionProxy: makeFunctionProxyHandler(consulResolver, logger, stats, *functionTimeout),
UpdateHandler: handlers.MakeDeploy(nomadClient.Jobs(), *providerConfig, logger, stats),
InfoHandler: handlers.MakeInfo(logger, stats, version),
Health: handlers.MakeHealthHandler(),
}
}
func makeDependencies(statsDAddr, thisAddr, nomadAddr, consulAddr, consulACL, region string) (hclog.Logger, *statsd.Client, *api.Client, *consul.Resolver) {
logger := setupLogging()
logger.Info("Using StatsD server:" + statsDAddr)
stats, err := statsd.New(statsDAddr)
if err != nil {
logger.Error("Error creating statsd client", err)
}
// prefix every metric with the app name
stats.Namespace = "faas.nomadd."
stats.Tags = append(stats.Tags, "instance:"+strings.Replace(thisAddr, ":", "_", -1))
c := api.DefaultConfig()
logger.Info("create nomad client", "addr", nomadAddr)
nomadClient, err := api.NewClient(c.ClientConfig(region, nomadAddr, false))
if err != nil {
logger.Error("Unable to create nomad client", err)
}
cr := consul.NewResolver(consulAddr, consulACL, logger.Named("consul_resolver"))
return logger, stats, nomadClient, cr
}
func setupLogging() hclog.Logger {
logJSON := false
if *loggerFormat == "json" {
logJSON = true
}
appLogger := hclog.New(&hclog.LoggerOptions{
Name: "nomadd",
Level: hclog.LevelFromString(*loggerLevel),
JSONFormat: logJSON,
Output: createLogFile(),
})
return appLogger
}
func createLogFile() *os.File {
if logFile := os.Getenv("logger_output"); logFile != "" {
f, err := os.OpenFile(logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err == nil {
return f
}
log.Printf("Unable to open file for output, defaulting to std out: %s\n", err.Error())
}
return os.Stdout
}
func makeFunctionProxyHandler(r consul.ServiceResolver, logger hclog.Logger, s *statsd.Client, timeout time.Duration) http.HandlerFunc {
return handlers.MakeExtractFunctionMiddleWare(
func(r *http.Request) map[string]string {
return mux.Vars(r)
},
handlers.MakeProxy(
handlers.ProxyConfig{
Client: handlers.MakeProxyClient(timeout, logger),
Resolver: r,
Logger: logger,
StatsD: s,
Timeout: timeout,
},
),
)
}
func makeReplicationReader(client nomad.Job, logger hclog.Logger, stats metrics.StatsD) http.HandlerFunc {
return handlers.MakeExtractFunctionMiddleWare(
func(r *http.Request) map[string]string {
return mux.Vars(r)
},
handlers.MakeReplicationReader(client, logger, stats),
)
}
func makeReplicationUpdater(client nomad.Job, logger hclog.Logger, stats metrics.StatsD) http.HandlerFunc {
return handlers.MakeExtractFunctionMiddleWare(
func(r *http.Request) map[string]string {
return mux.Vars(r)
},
handlers.MakeReplicationWriter(client, logger, stats),
)
}
|
[
"\"STATSD_ADDR\"",
"\"NOMAD_ADDR_http\"",
"\"NOMAD_ADDR\"",
"\"CONSUL_ADDR\"",
"\"NOMAD_REGION\"",
"\"logger_level\"",
"\"logger_format\"",
"\"logger_output\"",
"\"logger_output\""
] |
[] |
[
"NOMAD_REGION",
"NOMAD_ADDR",
"logger_format",
"CONSUL_ADDR",
"logger_level",
"STATSD_ADDR",
"NOMAD_ADDR_http",
"logger_output"
] |
[]
|
["NOMAD_REGION", "NOMAD_ADDR", "logger_format", "CONSUL_ADDR", "logger_level", "STATSD_ADDR", "NOMAD_ADDR_http", "logger_output"]
|
go
| 8 | 0 | |
codes/metric_camera_coplanarity_quaternion_wc_jacobian.py
|
from sympy import *
from quaternion_R_utils import *
px_1, py_1, pz_1 = symbols('px_1 py_1 pz_1')
q0_1, q1_1, q2_1, q3_1 = symbols('q0_1 q1_1 q2_1 q3_1')
px_2, py_2, pz_2 = symbols('px_2 py_2 pz_2')
q0_2, q1_2, q2_2, q3_2 = symbols('q0_2 q1_2 q2_2 q3_2')
ksi_1, eta_1, ksi_2, eta_2, ksi_01, eta_01, ksi_02, eta_02, c_1, c_2 = symbols('ksi_1 eta_1 ksi_2 eta_2 ksi_01 eta_01 ksi_02 eta_02 c_1 c_2');
position_symbols_1 = [px_1, py_1, pz_1]
quaternion_symbols_1 = [q0_1, q1_1, q2_1, q3_1]
position_symbols_2 = [px_2, py_2, pz_2]
quaternion_symbols_2 = [q0_2, q1_2, q2_2, q3_2]
c_symbols = [c_1, c_2]
all_symbols = position_symbols_1 + quaternion_symbols_1 + position_symbols_2 + quaternion_symbols_2
bx=px_2-px_1
by=py_2-py_1
bz=pz_2-pz_1
b=Matrix([[0, -bz, by], [bz, 0, -bx], [-by, bx, 0]])
C_1t=Matrix([[1, 0, -ksi_01], [0, 1, -eta_01], [0, 0, -c_1]]).transpose()
C_2=Matrix([[1, 0, -ksi_02], [0, 1, -eta_02], [0, 0, -c_2]])
camera_matrix_1 = matrix44FromQuaternion(px_1, py_1, pz_1, q0_1, q1_1, q2_1, q3_1)
R_1t=camera_matrix_1[:-1,:-1].transpose()
camera_matrix_2 = matrix44FromQuaternion(px_2, py_2, pz_2, q0_2, q1_2, q2_2, q3_2)
R_2=camera_matrix_2[:-1,:-1]
ksieta_1=Matrix([[ksi_1, eta_1, 1]])
ksieta_2t=Matrix([[ksi_2, eta_2, 1]]).transpose()
obs_eq = Matrix([[0]]) - ksieta_1 * C_1t * R_1t * b * R_2 * C_2 * ksieta_2t
obs_eq_jacobian = obs_eq.jacobian(all_symbols)
print(obs_eq)
print(obs_eq_jacobian)
with open("metric_camera_coplanarity_quaternion_wc_jacobian.h",'w') as f_cpp:
f_cpp.write("inline void observation_equation_metric_camera_coplanarity_quaternion_wc(double &delta, double ksi_01, double eta_01, double c_1, double ksi_1, double eta_1, double px_1, double py_1, double pz_1, double q0_1, double q1_1, double q2_1, double q3_1, double ksi_02, double eta_02, double c_2, double ksi_2, double eta_2, double px_2, double py_2, double pz_2, double q0_2, double q1_2, double q2_2, double q3_2)\n")
f_cpp.write("{")
f_cpp.write("delta = %s;\n"%(ccode(obs_eq[0])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void observation_equation_metric_camera_coplanarity_quaternion_wc_jacobian(Eigen::Matrix<double, 1, 14, Eigen::RowMajor> &j, double ksi_01, double eta_01, double c_1, double ksi_1, double eta_1, double px_1, double py_1, double pz_1, double q0_1, double q1_1, double q2_1, double q3_1, double ksi_02, double eta_02, double c_2, double ksi_2, double eta_2, double px_2, double py_2, double pz_2, double q0_2, double q1_2, double q2_2, double q3_2)\n")
f_cpp.write("{")
for i in range (12):
f_cpp.write("j.coeffRef(%d,%d) = %s;\n"%(0,i, ccode(obs_eq_jacobian[0,i])))
f_cpp.write("}")
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
src/util/tests/diag.py
|
# Diagnostic Algthm
import csv
import sys
import datetime
from numpy import *
from datetime import date as dte
from datetime import datetime as dt
import pandas as pd
sys.path.append("./archive")
from mesowest import WyomingUpperAir, MesoWest
import time
import os
import requests
import pprint as pp
from rich import print, box
from rich.panel import Panel
from rich.progress import track
from rich.table import Table
from rich.console import Console
from rich.progress import (
BarColumn,
DownloadColumn,
TextColumn,
TransferSpeedColumn,
TimeRemainingColumn,
Progress,
TaskID,
)
## Timeout Retry
REQUESTS_MAX_RETRIES = int(os.getenv("REQUESTS_MAX_RETRIES", 10))
adapter = requests.adapters.HTTPAdapter(max_retries=REQUESTS_MAX_RETRIES)
output = Console(record=True)
progress_log = Progress(console=output)
progress = Progress(TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
TimeRemainingColumn())
class Diagnostic:
def closest(lst, K, d):
lst = asarray(lst)
list = []
tmp2 = dt.combine(d, K)
for i in range(len(lst)):
list.append(abs(dt.combine(d, lst[i]) - tmp2))
idx = asarray(list).argmin()
return lst[idx]
def diag_abq(self, date,pw_abq00z, pw_abq12z):
dat = dt.strptime(date, "%m/%d/%Y")
try:
pw00 = WyomingUpperAir.request_data(dat + datetime.timedelta(days=1), "ABQ").pw[0]
except ValueError:
pw00 = "NaN"
if pw_abq00z == "NaN" or pw00 == "NaN":
final_00 = "[bold orange3]NaN"
fail_00 = None
else:
if float(pw00) == float(pw_abq00z):
final_00 = "[bold green]Success" # Success
fail_00 = None
else:
final_00 = "[bold red]Fail" # Fail
fail_00 = "[red]ABQ 00: {} != {}".format(pw00, pw_abq_00z)
try:
pw12 = WyomingUpperAir.request_data(dt.combine(dat,datetime.time(12, 0)), "ABQ").pw[0]
except ValueError:
pw12 = "NaN"
if pw_abq12z == "NaN" or pw12 == "NaN":
final_12 = "[bold orange3]NaN"
fail_12 = None
else:
if float(pw12) == float(pw_abq12z):
final_12 = "[bold green]Success" # Success
fail_12 = None
else:
final_12 = "[bold red]Fail" # Fail
fail_12 = "[red]ABQ 12: {} != {}".format(pw12, pw_abq_12z)
return final_00, final_12, fail_00, fail_12
def diag_epz(self, date, pw_epz00z, pw_epz12z):
dat = dt.strptime(date, "%m/%d/%Y")
try:
pw00 = WyomingUpperAir.request_data(dat + datetime.timedelta(days=1), "EPZ").pw[0]
except ValueError:
pw00 = "NaN"
if pw_epz00z == "NaN" or pw00 == "NaN":
final_00 = "[bold orange3]NaN"
fail_00 = None
else:
if float(pw00) == float(pw_epz00z):
final_00 = "[bold green]Success" # Success
fail_00 = None
else:
final_00 = "[bold red]Fail" # Fail
fail_00 = "[red]EPZ 00: {} != {}".format(pw00, pw_epz00z)
try:
pw12 = WyomingUpperAir.request_data(dt.combine(dat,datetime.time(12, 0)), "EPZ").pw[0]
except ValueError:
pw12 = "NaN"
if pw_epz12z == "NaN" or pw12 == "NaN":
final_12 = "[bold orange3]NaN"
fail_12 = None
else:
if float(pw12) == float(pw_epz12z):
final_12 = "[bold green]Success" # Success
fail_12 = None
else:
final_12 = "[bold red]Fail" # Fail
fail_12 = "[red]EPZ 12: {} != {}".format(pw12, pw_epz12z)
return final_00, final_12, fail_00, fail_12
def diag_mesowest(self, date, vtime, rh):
dat = dt.strptime(date, "%m/%d/%Y")
df_mw = MesoWest.request_data(dat + datetime.timedelta(days=1), "KONM")
in_time = pd.to_datetime(vtime).time()
df_tm = df_mw.loc[(df_mw['Time'] == Diagnostic.closest(df_mw['Time'], in_time, dat))]
data_mesowest = [df_tm['RH'].values.tolist()[0],
df_tm['Time'].values.tolist()[0],
round(df_tm['Temp'].values.tolist()[0], 2)]
if float(rh) == float(data_mesowest[0]):
final_rh = "[bold green]Success" # Success
fail_rh = None
else:
final_rh = "[bold red]Fail" # Fail
fail_rh = "[red]RH: {} != {}".format(rh, data_mesowest[0])
return final_rh, fail_rh
def eval_table(self,date, abq00, abq12, epz00, epz12, rh, fail):
table = Table(show_header=False, box=None, padding=(0,1,0,0))
table.add_column("Date", width=12, justify="left", no_wrap=True)
table.add_column("ABQ 00", width=12, justify="left", no_wrap=True)
table.add_column("ABQ 12", width=12, justify="left", no_wrap=True)
table.add_column("EPZ 00", width=12, justify="left", no_wrap=True)
table.add_column("EPZ 12", width=12, justify="left", no_wrap=True)
table.add_column("RH", width=12, justify="left", no_wrap=True)
table.add_row("{}".format(date),"{}".format(abq00),"{}".format(abq12),"{}".format(epz00),"{}".format(epz12),"{}".format(rh))
if [fail[i] != None for i in range(0, len(fail))] != [False, False, False, False, False]:
indx = [fail[i] != None for i in range(0, len(fail))].index(True)
table_fail = Table(show_header=False, box=None, padding=(0,1,0,0))
table_fail.add_column("Error", width=100, justify="left", no_wrap=True)
table_fail.add_row("[bold red]{}".format(fail[indx]))
progress_log.log(table)
progress_log.log(table_fail)
else:
progress_log.log(table)
if __name__ == '__main__':
fname = "../data/master_data.csv"
data = loadtxt(fname, skiprows=1,delimiter=",", dtype=str, unpack=True, usecols=(0,2,3,4,5,6,7))
date = data[0]
rh = data[1]
pw_abq_12z = data[2]
pw_abq_00z = data[3]
pw_epz_12z = data[4]
pw_epz_00z = data[5]
vtime = data[6]
table = Table(box=None, padding=(0,1,0,0))
table.add_column("Date", width=12, justify="left", no_wrap=True)
table.add_column("ABQ 00", width=12, justify="left", no_wrap=True)
table.add_column("ABQ 12", width=12, justify="left", no_wrap=True)
table.add_column("EPZ 00", width=12, justify="left", no_wrap=True)
table.add_column("EPZ 12", width=12, justify="left", no_wrap=True)
table.add_column("RH", width=12, justify="left", no_wrap=True)
progress_log.log(table)
task_id = progress.add_task("download", filename="Diagnostic Algorithm")
def main(i):
D = Diagnostic()
final_abq_00, final_abq_12, fail_abq_00, fail_abq_12 = D.diag_abq(date[i], pw_abq_00z[i], pw_abq_12z[i])
final_epz_00, final_epz_12, fail_epz_00, fail_epz_12 = D.diag_epz(date[i], pw_epz_00z[i], pw_epz_12z[i])
final_rh, fail_rh = D.diag_mesowest(date[i],vtime[i], rh[i])
fail = [fail_abq_00, fail_abq_12, fail_epz_00, fail_epz_12, fail_rh]
D.eval_table(date[i], final_abq_00, final_abq_12, final_epz_00, final_epz_12, final_rh, fail)
progress.update(task_id, advance=100./(len(data[0])),refresh=True)
for i in range(0+200, len(data[0])):
ex = "requests.exception.HTTPError"
while ex == "requests.exception.HTTPError":
try:
main(i)
ex = None
except requests.exceptions.HTTPError as exception:
time.sleep(60)
main(i)
ex = str(exception)
with open("./output.txt", "a") as f:
export = output.export_text(clear=True)
if export[0] == "[":
f.write(export)
else:
f.write(export[5:])
|
[] |
[] |
[
"REQUESTS_MAX_RETRIES"
] |
[]
|
["REQUESTS_MAX_RETRIES"]
|
python
| 1 | 0 | |
pkg/cmd/get/get_activity_test.go
|
// +build unit
package get_test
import (
"os"
"testing"
"time"
"github.com/olli-ai/jx/v2/pkg/cmd/get"
"github.com/olli-ai/jx/v2/pkg/cmd/opts"
"github.com/olli-ai/jx/v2/pkg/cmd/testhelpers"
"github.com/olli-ai/jx/v2/pkg/gits"
helm_test "github.com/olli-ai/jx/v2/pkg/helm/mocks"
resources_test "github.com/olli-ai/jx/v2/pkg/kube/resources/mocks"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestGetActivity(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Get Activity Suite")
}
var _ = Describe("get activity", func() {
Describe("Run()", func() {
var (
originalRepoOwner string
originalRepoName string
originalJobName string
originalBranchName string
sort bool
err error
stdout *testhelpers.FakeOut
)
BeforeEach(func() {
originalRepoOwner = os.Getenv("REPO_OWNER")
originalRepoName = os.Getenv("REPO_NAME")
originalJobName = os.Getenv("JOB_NAME")
originalBranchName = os.Getenv("BRANCH_NAME")
os.Setenv("REPO_OWNER", "jx-testing")
os.Setenv("REPO_NAME", "jx-testing")
os.Setenv("JOB_NAME", "job")
os.Setenv("BRANCH_NAME", "job")
})
AfterEach(func() {
os.Setenv("REPO_OWNER", originalRepoOwner)
os.Setenv("REPO_NAME", originalRepoName)
os.Setenv("JOB_NAME", originalJobName)
os.Setenv("BRANCH_NAME", originalBranchName)
})
JustBeforeEach(func() {
stdout = &testhelpers.FakeOut{}
commonOpts := &opts.CommonOptions{
Out: stdout,
}
commonOpts.SetDevNamespace("jx")
testhelpers.ConfigureTestOptionsWithResources(commonOpts,
[]runtime.Object{},
[]runtime.Object{},
&gits.GitFake{CurrentBranch: "job"},
&gits.FakeProvider{},
helm_test.NewMockHelmer(),
resources_test.NewMockInstaller(),
)
c, ns, _ := commonOpts.JXClient()
testhelpers.CreateTestPipelineActivityWithTime(c, ns, "jx-testing", "jx-testing", "job", "1", "workflow", v1.Date(2019, time.October, 10, 23, 0, 0, 0, time.UTC))
testhelpers.CreateTestPipelineActivityWithTime(c, ns, "jx-testing", "jx-testing", "job", "2", "workflow", v1.Date(2019, time.January, 10, 23, 0, 0, 0, time.UTC))
options := &get.GetActivityOptions{
CommonOptions: commonOpts,
Sort: sort,
}
err = options.Run()
})
Context("Without flags", func() {
BeforeEach(func() {
sort = false
})
It("Prints a list of activities", func() {
Expect(err).NotTo(HaveOccurred())
Expect(stdout.GetOutput()).To(ContainSubstring(`STARTED AGO DURATION STATUS
jx-testing/jx-testing/job #1`))
})
})
Context("With the sort flag", func() {
BeforeEach(func() {
sort = true
})
It("Prints a sorted list of activities", func() {
Expect(err).NotTo(HaveOccurred())
Expect(stdout.GetOutput()).To(ContainSubstring(`STARTED AGO DURATION STATUS
jx-testing/jx-testing/job #2`))
})
})
})
})
|
[
"\"REPO_OWNER\"",
"\"REPO_NAME\"",
"\"JOB_NAME\"",
"\"BRANCH_NAME\""
] |
[] |
[
"BRANCH_NAME",
"JOB_NAME",
"REPO_NAME",
"REPO_OWNER"
] |
[]
|
["BRANCH_NAME", "JOB_NAME", "REPO_NAME", "REPO_OWNER"]
|
go
| 4 | 0 | |
airflow/operators/bash_operator.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import signal
from subprocess import Popen, STDOUT, PIPE
from tempfile import gettempdir, NamedTemporaryFile
from typing import Dict, Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.file import TemporaryDirectory
from airflow.utils.operator_helpers import context_to_airflow_vars
class BashOperator(BaseOperator):
"""
Execute a Bash script, command or set of commands.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BashOperator`
If BaseOperator.do_xcom_push is True, the last line written to stdout
will also be pushed to an XCom when the bash command completes
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed. (templated)
:type bash_command: str
:param env: If env is not None, it must be a mapping that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:type env: dict
:param output_encoding: Output encoding of bash command
:type output_encoding: str
On execution of this operator the task will be up for retry
when exception is raised. However, if a sub-command exits with non-zero
value Airflow will not recognize it as failure unless the whole shell exits
with a failure. The easiest way of achieving this is to prefix the command
with ``set -e;``
Example:
.. code-block:: python
bash_command = "set -e; python3 script.py '{{ next_execution_date }}'"
"""
template_fields = ('bash_command', 'env')
template_ext = ('.sh', '.bash',)
ui_color = '#f0ede4'
@apply_defaults
def __init__(
self,
bash_command: str,
env: Optional[Dict[str, str]] = None,
output_encoding: str = 'utf-8',
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
def execute(self, context):
"""
Execute the bash command in a temporary directory
which will be cleaned afterwards
"""
self.log.info('Tmp dir root location: \n %s', gettempdir())
# Prepare env for child process.
env = self.env
if env is None:
env = os.environ.copy()
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.info('Exporting the following env vars:\n%s',
'\n'.join(["{}={}".format(k, v)
for k, v in
airflow_context_vars.items()]))
env.update(airflow_context_vars)
self.lineage_data = self.bash_command
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as tmp_file:
tmp_file.write(bytes(self.bash_command, 'utf_8'))
tmp_file.flush()
script_location = os.path.abspath(tmp_file.name)
self.log.info('Temporary script location: %s', script_location)
def pre_exec():
# Restore default signal disposition and invoke setsid
for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
if hasattr(signal, sig):
signal.signal(getattr(signal, sig), signal.SIG_DFL)
os.setsid()
self.log.info('Running command: %s', self.bash_command)
sub_process = Popen(
['bash', tmp_file.name],
stdout=PIPE,
stderr=STDOUT,
cwd=tmp_dir,
env=env,
preexec_fn=pre_exec)
self.sub_process = sub_process
self.log.info('Output:')
line = ''
for raw_line in iter(sub_process.stdout.readline, b''):
line = raw_line.decode(self.output_encoding).rstrip()
self.log.info(line)
sub_process.wait()
self.log.info('Command exited with return code %s', sub_process.returncode)
if sub_process.returncode:
raise AirflowException('Bash command failed')
return line
def on_kill(self):
self.log.info('Sending SIGTERM signal to bash process group')
os.killpg(os.getpgid(self.sub_process.pid), signal.SIGTERM)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
generate_skillet_preview.py
|
# 12-12-19 [email protected]
import os
import re
import sys
from lxml import etree
from skilletlib import Panoply
config_source = os.environ.get('skillet_source', 'offline')
if config_source == 'offline':
# grab our two configs from the environment
base_config_path = os.environ.get('BASE_CONFIG', '/Users/nembery/Downloads/iron_skillet_panos_full.xml')
# base_config_path = os.environ.get('BASE_CONFIG', '')
latest_config_path = os.environ.get('LATEST_CONFIG', '/Users/nembery/Downloads/running-config (16).xml')
with open(base_config_path, 'r') as bcf:
base_config = bcf.read()
with open(latest_config_path, 'r') as lcf:
latest_config = lcf.read()
p = Panoply()
snippets = p.generate_skillet_from_configs(base_config, latest_config)
else:
# each variable will be present in the environ dict on the 'os' module
username = os.environ.get('TARGET_USERNAME', 'admin')
password = os.environ.get('TARGET_PASSWORD', '')
ip = os.environ.get('TARGET_IP', '')
from_candidate = os.environ.get('FROM_CANDIDATE', 'False')
p = Panoply(hostname=ip, api_username=username, api_password=password, debug=False)
snippets = p.generate_skillet(from_candidate=from_candidate)
if from_candidate:
latest_config = p.get_configuration(config_source='candidate')
else:
latest_config = p.get_configuration(config_source='running')
# check we actually have some diffs
if len(snippets) == 0:
print('No Candidate Configuration can be found to use to build a skillet!')
sys.exit(2)
latest_doc = etree.fromstring(latest_config)
print('#' * 80)
print(' ')
print('The following xpaths were found to be modified:')
print(' ')
print('-' * 80)
print(' ')
for s in snippets:
name = s.get('name', '')
snippet_xpath = s.get('xpath')
full_xpath = s.get('full_xpath', '')
print(f'<a href="#{name}">{full_xpath}</a>')
xpath = re.sub('^/config', '.', snippet_xpath)
# parent_element_xpath = '.' + "/".join(xpath.split('/')[:-1])
parent_elements = latest_doc.xpath(xpath)
if not parent_elements:
print('something is broken here')
continue
parent_element = parent_elements[0]
element_string = s.get('element', '')
# find child element index
index = 0
found = False
for child in parent_element:
cs = etree.tostring(child).decode('UTF-8')
cs_stripped = cs.strip()
whitespace_match = re.search(r'(\s+)$', cs)
if whitespace_match:
whitespace = whitespace_match.group()
else:
whitespace = ''
if element_string == cs_stripped:
# found our child index
found = True
parent_element.remove(child)
title = snippet_xpath.replace('"', "'")
wrapped_child_element = \
etree.fromstring(
f'<span id="{name}" class="text-danger" title="{title}">{element_string}{whitespace}</span>')
parent_element.insert(index, wrapped_child_element)
break
index = index + 1
if not found:
print('did not find this, odd')
latest_config_formatted = etree.tostring(latest_doc, pretty_print=True).decode('UTF-8')
latest_config_html = latest_config_formatted.replace('<', '<').replace('>', '>')
fixed_config_html_1 = re.sub(r'<span id="(.*?)" class="(.*?)" title="(.*?)">',
r'<span class="\2" id="\1" title="\3">', latest_config_html)
fixed_config_html_2 = re.sub(r'</span>', r'</span>', fixed_config_html_1)
print('-' * 80)
print(fixed_config_html_2)
print('-' * 80)
print('#' * 80)
# later gator
sys.exit(0)
|
[] |
[] |
[
"skillet_source",
"TARGET_USERNAME",
"TARGET_PASSWORD",
"BASE_CONFIG",
"LATEST_CONFIG",
"TARGET_IP",
"FROM_CANDIDATE"
] |
[]
|
["skillet_source", "TARGET_USERNAME", "TARGET_PASSWORD", "BASE_CONFIG", "LATEST_CONFIG", "TARGET_IP", "FROM_CANDIDATE"]
|
python
| 7 | 0 | |
db/db.go
|
package db
import (
"database/sql"
"fmt"
"github.com/joho/godotenv"
"log"
"os"
)
func New() *sql.DB {
config, err := loadConfig()
if err != nil {
log.Fatalf("Error loading config.env file: %v", err)
}
db, err := initDatabase(config)
if err != nil {
log.Fatalf("Error initializing the database: %v", err)
}
if err := db.Ping(); err != nil {
log.Fatalf("Error pinging DB: %v", err)
}
return db
}
// Config represents structure of the config.env
type Config struct {
dbUser string
dbPass string
dbName string
dbHost string
dbPort string
}
func loadConfig() (config *Config, err error) {
err = godotenv.Load("config.env")
if err != nil {
log.Fatal("Error loading config.env file")
}
config = &Config {
dbUser : os.Getenv("db_user"),
dbPass : os.Getenv("db_pass"),
dbName : os.Getenv("db_name"),
dbHost : os.Getenv("db_host"),
dbPort : os.Getenv("db_port"),
}
return config, err
}
func initDatabase(c *Config) (db *sql.DB, err error) {
psqlInfo := fmt.Sprintf("host=%s port=%s user=%s "+
"password=%s dbname=%s sslmode=disable",
c.dbHost, c.dbPort, c.dbUser, c.dbPass, c.dbName)
db, err = sql.Open("postgres", psqlInfo)
return db, err
}
|
[
"\"db_user\"",
"\"db_pass\"",
"\"db_name\"",
"\"db_host\"",
"\"db_port\""
] |
[] |
[
"db_user",
"db_port",
"db_name",
"db_pass",
"db_host"
] |
[]
|
["db_user", "db_port", "db_name", "db_pass", "db_host"]
|
go
| 5 | 0 | |
extensions-core/druid-kerberos/src/main/java/org/apache/druid/security/kerberos/KerberosAuthenticator.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.security.kerberos;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonTypeName;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import org.apache.commons.codec.binary.Base64;
import org.apache.druid.guice.annotations.Self;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.server.DruidNode;
import org.apache.druid.server.security.AuthConfig;
import org.apache.druid.server.security.AuthenticationResult;
import org.apache.druid.server.security.Authenticator;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.security.authentication.util.Signer;
import org.apache.hadoop.security.authentication.util.SignerException;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.eclipse.jetty.client.api.Request;
import org.eclipse.jetty.http.HttpHeader;
import sun.security.krb5.EncryptedData;
import sun.security.krb5.EncryptionKey;
import sun.security.krb5.internal.APReq;
import sun.security.krb5.internal.EncTicketPart;
import sun.security.krb5.internal.Krb5;
import sun.security.krb5.internal.Ticket;
import sun.security.krb5.internal.crypto.KeyUsage;
import sun.security.util.DerInputStream;
import sun.security.util.DerValue;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosKey;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.kerberos.KeyTab;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.servlet.DispatcherType;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.net.HttpCookie;
import java.security.Principal;
import java.text.SimpleDateFormat;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.ThreadLocalRandom;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
@JsonTypeName("kerberos")
public class KerberosAuthenticator implements Authenticator
{
private static final Logger log = new Logger(KerberosAuthenticator.class);
private static final Pattern HADOOP_AUTH_COOKIE_REGEX = Pattern.compile(".*p=(\\S+)&t=.*");
public static final List<String> DEFAULT_EXCLUDED_PATHS = Collections.emptyList();
public static final String SIGNED_TOKEN_ATTRIBUTE = "signedToken";
private final DruidNode node;
private final String serverPrincipal;
private final String serverKeytab;
private final String authToLocal;
private final List<String> excludedPaths;
private final String cookieSignatureSecret;
private final String authorizerName;
private final String name;
private LoginContext loginContext;
@JsonCreator
public KerberosAuthenticator(
@JsonProperty("serverPrincipal") String serverPrincipal,
@JsonProperty("serverKeytab") String serverKeytab,
@JsonProperty("authToLocal") String authToLocal,
@JsonProperty("excludedPaths") List<String> excludedPaths,
@JsonProperty("cookieSignatureSecret") String cookieSignatureSecret,
@JsonProperty("authorizerName") String authorizerName,
@JsonProperty("name") String name,
@JacksonInject @Self DruidNode node
)
{
this.node = node;
this.serverKeytab = serverKeytab;
this.authToLocal = authToLocal == null ? "DEFAULT" : authToLocal;
this.excludedPaths = excludedPaths == null ? DEFAULT_EXCLUDED_PATHS : excludedPaths;
this.cookieSignatureSecret = cookieSignatureSecret;
this.authorizerName = authorizerName;
this.name = Preconditions.checkNotNull(name);
try {
this.serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, node.getHost());
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public Filter getFilter()
{
return new AuthenticationFilter()
{
private Signer mySigner;
@Override
public void init(FilterConfig filterConfig) throws ServletException
{
ClassLoader prevLoader = Thread.currentThread().getContextClassLoader();
try {
// AuthenticationHandler is created during Authenticationfilter.init using reflection with thread context class loader.
// In case of druid since the class is actually loaded as an extension and filter init is done in main thread.
// We need to set the classloader explicitly to extension class loader.
Thread.currentThread().setContextClassLoader(AuthenticationFilter.class.getClassLoader());
super.init(filterConfig);
String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
configPrefix = (configPrefix != null) ? configPrefix + "." : "";
Properties config = getConfiguration(configPrefix, filterConfig);
String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET);
if (signatureSecret == null) {
signatureSecret = Long.toString(ThreadLocalRandom.current().nextLong());
log.warn("'signature.secret' configuration not set, using a random value as secret");
}
final byte[] secretBytes = StringUtils.toUtf8(signatureSecret);
SignerSecretProvider signerSecretProvider = new SignerSecretProvider()
{
@Override
public void init(Properties config, ServletContext servletContext, long tokenValidity)
{
}
@Override
public byte[] getCurrentSecret()
{
return secretBytes;
}
@Override
public byte[][] getAllSecrets()
{
return new byte[][]{secretBytes};
}
};
mySigner = new Signer(signerSecretProvider);
}
finally {
Thread.currentThread().setContextClassLoader(prevLoader);
}
}
// Copied from hadoop-auth's AuthenticationFilter, to allow us to change error response handling in doFilterSuper
@Override
protected AuthenticationToken getToken(HttpServletRequest request) throws AuthenticationException
{
AuthenticationToken token = null;
String tokenStr = null;
Cookie[] cookies = request.getCookies();
if (cookies != null) {
for (Cookie cookie : cookies) {
if (cookie.getName().equals(AuthenticatedURL.AUTH_COOKIE)) {
tokenStr = cookie.getValue();
try {
tokenStr = mySigner.verifyAndExtract(tokenStr);
}
catch (SignerException ex) {
throw new AuthenticationException(ex);
}
break;
}
}
}
if (tokenStr != null) {
token = AuthenticationToken.parse(tokenStr);
if (!token.getType().equals(getAuthenticationHandler().getType())) {
throw new AuthenticationException("Invalid AuthenticationToken type");
}
if (token.isExpired()) {
throw new AuthenticationException("AuthenticationToken expired");
}
}
return token;
}
@Override
public void doFilter(
ServletRequest request, ServletResponse response, FilterChain filterChain
) throws IOException, ServletException
{
HttpServletRequest httpReq = (HttpServletRequest) request;
// If there's already an auth result, then we have authenticated already, skip this.
if (request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT) != null) {
filterChain.doFilter(request, response);
return;
}
if (loginContext == null) {
initializeKerberosLogin();
}
String path = ((HttpServletRequest) request).getRequestURI();
if (isExcluded(path)) {
filterChain.doFilter(request, response);
} else {
String clientPrincipal = null;
try {
Cookie[] cookies = httpReq.getCookies();
if (cookies == null) {
clientPrincipal = getPrincipalFromRequestNew((HttpServletRequest) request);
} else {
clientPrincipal = null;
for (Cookie cookie : cookies) {
if ("hadoop.auth".equals(cookie.getName())) {
Matcher matcher = HADOOP_AUTH_COOKIE_REGEX.matcher(cookie.getValue());
if (matcher.matches()) {
clientPrincipal = matcher.group(1);
break;
}
}
}
}
}
catch (Exception ex) {
clientPrincipal = null;
}
if (clientPrincipal != null) {
request.setAttribute(
AuthConfig.DRUID_AUTHENTICATION_RESULT,
new AuthenticationResult(clientPrincipal, authorizerName, name, null)
);
}
}
doFilterSuper(request, response, filterChain);
}
// Copied from hadoop-auth's AuthenticationFilter, to allow us to change error response handling
private void doFilterSuper(ServletRequest request, ServletResponse response, FilterChain filterChain)
throws IOException, ServletException
{
boolean unauthorizedResponse = true;
int errCode = HttpServletResponse.SC_UNAUTHORIZED;
AuthenticationException authenticationEx = null;
HttpServletRequest httpRequest = (HttpServletRequest) request;
HttpServletResponse httpResponse = (HttpServletResponse) response;
boolean isHttps = "https".equals(httpRequest.getScheme());
try {
boolean newToken = false;
AuthenticationToken token;
try {
token = getToken(httpRequest);
}
catch (AuthenticationException ex) {
log.warn("AuthenticationToken ignored: " + ex.getMessage());
// will be sent back in a 401 unless filter authenticates
authenticationEx = ex;
token = null;
}
if (getAuthenticationHandler().managementOperation(token, httpRequest, httpResponse)) {
if (token == null) {
if (log.isDebugEnabled()) {
log.debug("Request [{%s}] triggering authentication", getRequestURL(httpRequest));
}
token = getAuthenticationHandler().authenticate(httpRequest, httpResponse);
if (token != null && token.getExpires() != 0 &&
token != AuthenticationToken.ANONYMOUS) {
token.setExpires(System.currentTimeMillis() + getValidity() * 1000);
}
newToken = true;
}
if (token != null) {
unauthorizedResponse = false;
if (log.isDebugEnabled()) {
log.debug("Request [{%s}] user [{%s}] authenticated", getRequestURL(httpRequest), token.getUserName());
}
final AuthenticationToken authToken = token;
httpRequest = new HttpServletRequestWrapper(httpRequest)
{
@Override
public String getAuthType()
{
return authToken.getType();
}
@Override
public String getRemoteUser()
{
return authToken.getUserName();
}
@Override
public Principal getUserPrincipal()
{
return (authToken != AuthenticationToken.ANONYMOUS) ? authToken : null;
}
};
if (newToken && !token.isExpired() && token != AuthenticationToken.ANONYMOUS) {
String signedToken = mySigner.sign(token.toString());
tokenToAuthCookie(
httpResponse,
signedToken,
getCookieDomain(),
getCookiePath(),
token.getExpires(),
!token.isExpired() && token.getExpires() > 0,
isHttps
);
request.setAttribute(SIGNED_TOKEN_ATTRIBUTE, tokenToCookieString(
signedToken,
getCookieDomain(),
getCookiePath(),
token.getExpires(),
!token.isExpired() && token.getExpires() > 0,
isHttps
));
}
// Since this request is validated also set DRUID_AUTHENTICATION_RESULT
request.setAttribute(
AuthConfig.DRUID_AUTHENTICATION_RESULT,
new AuthenticationResult(token.getName(), authorizerName, name, null)
);
doFilter(filterChain, httpRequest, httpResponse);
}
} else {
unauthorizedResponse = false;
}
}
catch (AuthenticationException ex) {
// exception from the filter itself is fatal
errCode = HttpServletResponse.SC_FORBIDDEN;
authenticationEx = ex;
if (log.isDebugEnabled()) {
log.debug(ex, "Authentication exception: " + ex.getMessage());
} else {
log.warn("Authentication exception: " + ex.getMessage());
}
}
if (unauthorizedResponse) {
if (!httpResponse.isCommitted()) {
tokenToAuthCookie(
httpResponse,
"",
getCookieDomain(),
getCookiePath(),
0,
false,
isHttps
);
// If response code is 401. Then WWW-Authenticate Header should be
// present.. reset to 403 if not found..
if ((errCode == HttpServletResponse.SC_UNAUTHORIZED)
&& (!httpResponse.containsHeader(
org.apache.hadoop.security.authentication.client.KerberosAuthenticator.WWW_AUTHENTICATE))) {
errCode = HttpServletResponse.SC_FORBIDDEN;
}
if (authenticationEx == null) {
// Don't send an error response here, unlike the base AuthenticationFilter implementation.
// This request did not use Kerberos auth.
// Instead, we will send an error response in PreResponseAuthorizationCheckFilter to allow
// other Authenticator implementations to check the request.
filterChain.doFilter(request, response);
} else {
// Do send an error response here, we attempted Kerberos authentication and failed.
httpResponse.sendError(errCode, authenticationEx.getMessage());
}
}
}
}
};
}
@Override
public Class<? extends Filter> getFilterClass()
{
return null;
}
@Override
public Map<String, String> getInitParameters()
{
Map<String, String> params = new HashMap<String, String>();
params.put("kerberos.principal", serverPrincipal);
params.put("kerberos.keytab", serverKeytab);
params.put(AuthenticationFilter.AUTH_TYPE, DruidKerberosAuthenticationHandler.class.getName());
params.put("kerberos.name.rules", authToLocal);
if (cookieSignatureSecret != null) {
params.put("signature.secret", cookieSignatureSecret);
}
return params;
}
@Override
public String getPath()
{
return "/*";
}
@Override
public EnumSet<DispatcherType> getDispatcherType()
{
return null;
}
@Override
public String getAuthChallengeHeader()
{
return "Negotiate";
}
@Override
public AuthenticationResult authenticateJDBCContext(Map<String, Object> context)
{
throw new UnsupportedOperationException("JDBC Kerberos auth not supported yet");
}
private boolean isExcluded(String path)
{
for (String excluded : excludedPaths) {
if (path.startsWith(excluded)) {
return true;
}
}
return false;
}
@Override
public void decorateProxyRequest(
HttpServletRequest clientRequest, HttpServletResponse proxyResponse, Request proxyRequest
)
{
Object cookieToken = clientRequest.getAttribute(SIGNED_TOKEN_ATTRIBUTE);
if (cookieToken != null && cookieToken instanceof String) {
log.debug("Found cookie token will attache it to proxyRequest as cookie");
String authResult = (String) cookieToken;
String existingCookies = proxyRequest.getCookies()
.stream()
.map(HttpCookie::toString)
.collect(Collectors.joining(";"));
proxyRequest.header(HttpHeader.COOKIE, Joiner.on(";").join(authResult, existingCookies));
}
}
/**
* Kerberos context configuration for the JDK GSS library. Copied from hadoop-auth's KerberosAuthenticationHandler.
*/
public static class DruidKerberosConfiguration extends Configuration
{
private String keytab;
private String principal;
public DruidKerberosConfiguration(String keytab, String principal)
{
this.keytab = keytab;
this.principal = principal;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name)
{
Map<String, String> options = new HashMap<String, String>();
if (System.getProperty("java.vendor").contains("IBM")) {
options.put(
"useKeytab",
keytab.startsWith("file://") ? keytab : "file://" + keytab
);
options.put("principal", principal);
options.put("credsType", "acceptor");
} else {
options.put("keyTab", keytab);
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("isInitiator", "false");
}
options.put("refreshKrb5Config", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
if (System.getProperty("java.vendor").contains("IBM")) {
options.put("useDefaultCcache", "true");
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
options.put("renewTGT", "true");
options.put("credsType", "both");
} else {
options.put("ticketCache", ticketCache);
}
}
if (log.isDebugEnabled()) {
options.put("debug", "true");
}
return new AppConfigurationEntry[]{
new AppConfigurationEntry(
KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options
),
};
}
}
private String getPrincipalFromRequestNew(HttpServletRequest req)
{
String authorization = req.getHeader(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.AUTHORIZATION);
if (authorization == null
|| !authorization.startsWith(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.NEGOTIATE)) {
return null;
} else {
authorization = authorization.substring(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.NEGOTIATE
.length()).trim();
final Base64 base64 = new Base64(0);
final byte[] clientToken = base64.decode(authorization);
try {
DerInputStream ticketStream = new DerInputStream(clientToken);
DerValue[] values = ticketStream.getSet(clientToken.length, true);
// see this link for AP-REQ format: https://tools.ietf.org/html/rfc1510#section-5.5.1
for (DerValue value : values) {
if (isValueAPReq(value)) {
APReq apReq = new APReq(value);
Ticket ticket = apReq.ticket;
EncryptedData encData = ticket.encPart;
int eType = encData.getEType();
// find the server's key
EncryptionKey finalKey = null;
Subject serverSubj = loginContext.getSubject();
Set<Object> serverCreds = serverSubj.getPrivateCredentials(Object.class);
for (Object cred : serverCreds) {
if (cred instanceof KeyTab) {
KeyTab serverKeyTab = (KeyTab) cred;
KerberosPrincipal kerberosPrincipal = new KerberosPrincipal(serverPrincipal);
KerberosKey[] serverKeys = serverKeyTab.getKeys(kerberosPrincipal);
for (KerberosKey key : serverKeys) {
if (key.getKeyType() == eType) {
finalKey = new EncryptionKey(key.getKeyType(), key.getEncoded());
break;
}
}
}
}
if (finalKey == null) {
log.error("Could not find matching key from server creds.");
return null;
}
// decrypt the ticket with the server's key
byte[] decryptedBytes = encData.decrypt(finalKey, KeyUsage.KU_TICKET);
decryptedBytes = encData.reset(decryptedBytes);
EncTicketPart decrypted = new EncTicketPart(decryptedBytes);
String clientPrincipal = decrypted.cname.toString();
return clientPrincipal;
}
}
}
catch (Exception ex) {
Throwables.propagate(ex);
}
}
return null;
}
private boolean isValueAPReq(DerValue value)
{
return value.isConstructed((byte) Krb5.KRB_AP_REQ);
}
private void initializeKerberosLogin() throws ServletException
{
String keytab;
try {
if (serverPrincipal == null || serverPrincipal.trim().length() == 0) {
throw new ServletException("Principal not defined in configuration");
}
keytab = serverKeytab;
if (keytab == null || keytab.trim().length() == 0) {
throw new ServletException("Keytab not defined in configuration");
}
if (!new File(keytab).exists()) {
throw new ServletException("Keytab does not exist: " + keytab);
}
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(serverPrincipal));
Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
DruidKerberosConfiguration kerberosConfiguration = new DruidKerberosConfiguration(keytab, serverPrincipal);
log.info("Login using keytab " + keytab + ", for principal " + serverPrincipal);
loginContext = new LoginContext("", subject, null, kerberosConfiguration);
loginContext.login();
log.info("Initialized, principal %s from keytab %s", serverPrincipal, keytab);
}
catch (Exception ex) {
throw new ServletException(ex);
}
}
/**
* Creates the Hadoop authentication HTTP cookie.
*
* @param resp the response object.
* @param token authentication token for the cookie.
* @param domain the cookie domain.
* @param path the cookie path.
* @param expires UNIX timestamp that indicates the expire date of the
* cookie. It has no effect if its value < 0.
* @param isSecure is the cookie secure?
* @param isCookiePersistent whether the cookie is persistent or not.
*the following code copy/past from Hadoop 3.0.0 copied to avoid compilation issue due to new signature,
* org.apache.hadoop.security.authentication.server.AuthenticationFilter#createAuthCookie
* (
* javax.servlet.http.HttpServletResponse,
* java.lang.String,
* java.lang.String,
* java.lang.String,
* long, boolean, boolean)
*/
private static void tokenToAuthCookie(
HttpServletResponse resp, String token,
String domain, String path, long expires,
boolean isCookiePersistent,
boolean isSecure
)
{
resp.addHeader("Set-Cookie", tokenToCookieString(token, domain, path, expires, isCookiePersistent, isSecure));
}
private static String tokenToCookieString(
String token,
String domain, String path, long expires,
boolean isCookiePersistent,
boolean isSecure
)
{
StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
.append("=");
if (token != null && token.length() > 0) {
sb.append("\"").append(token).append("\"");
}
if (path != null) {
sb.append("; Path=").append(path);
}
if (domain != null) {
sb.append("; Domain=").append(domain);
}
if (expires >= 0 && isCookiePersistent) {
Date date = new Date(expires);
SimpleDateFormat df = new SimpleDateFormat("EEE, dd-MMM-yyyy HH:mm:ss zzz", Locale.ENGLISH);
df.setTimeZone(TimeZone.getTimeZone("GMT"));
sb.append("; Expires=").append(df.format(date));
}
if (isSecure) {
sb.append("; Secure");
}
sb.append("; HttpOnly");
return sb.toString();
}
}
|
[
"\"KRB5CCNAME\""
] |
[] |
[
"KRB5CCNAME"
] |
[]
|
["KRB5CCNAME"]
|
java
| 1 | 0 | |
conda_tools/test/test_copy_ambertools.py
|
import os
import sys
from mock import patch
import shutil
sys.path.insert(0, '..')
import copy_ambertools as cam
this_path = os.path.join(os.path.dirname(__file__))
@patch('copy_ambertools._copy_folder')
@patch('os.getenv')
def test_copy_ambertools(mock_getenv, mock_copy):
fake_amberhome = os.path.join(this_path, 'fake_data', 'fake_amber')
ambertools_src = os.path.join(fake_amberhome, 'AmberTools')
def side_effect(name, *args, **kwargs):
if name == 'AMBER_SRC':
return fake_amberhome
elif name == 'RECIPE_DIR':
return os.path.join(this_path, '..', '..', 'conda-ambertools-single-python')
mock_getenv.side_effect = side_effect
cwd = os.getcwd()
tmp = 'ok_to_delete_me'
try:
os.mkdir(tmp)
except OSError:
pass
os.chdir(tmp)
cam.mkdir_ambertree()
cam.copy_tree()
os.chdir(cwd)
mock_copy.assert_called_with(os.path.join(fake_amberhome, 'linux-64'), '.')
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
misago/devproject/wsgi.py
|
"""
WSGI config for devproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devproject.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/azure-keyvault-secrets-webhook/authorization_test.go
|
package main
import (
"os"
"testing"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
type kubeConfig struct {
master string
config string
}
func ensureIntegrationEnvironment(t *testing.T) kubeConfig {
if os.Getenv("AKV2K8S_K8S_MASTER_URL") == "" || os.Getenv("AKV2K8S_K8S_CONFIG") == "" {
t.Skip("Skipping integration test - no k8s cluster defined")
}
return kubeConfig{
master: os.Getenv("AKV2K8S_K8S_MASTER_URL"),
config: os.Getenv("AKV2K8S_K8S_CONFIG"),
}
}
func TestAuthorization(t *testing.T) {
config := ensureIntegrationEnvironment(t)
podName := os.Getenv("AKV2K8S_K8S_TEST_POD")
podNamespace := os.Getenv("AKV2K8S_K8S_TEST_NAMESPACE")
podIP := os.Getenv("AKV2K8S_K8S_TEST_POD_IP")
cfg, err := clientcmd.BuildConfigFromFlags(config.master, config.config)
if err != nil {
t.Errorf("Error building kubeconfig: %s", err.Error())
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
t.Errorf("Error building kubernetes clientset: %s", err.Error())
}
pod := podData{
name: podName,
namespace: podNamespace,
remoteAddress: podIP,
}
err = authorize(kubeClient, pod)
if err != nil {
t.Errorf("failed, error: %+v", err)
}
}
|
[
"\"AKV2K8S_K8S_MASTER_URL\"",
"\"AKV2K8S_K8S_CONFIG\"",
"\"AKV2K8S_K8S_MASTER_URL\"",
"\"AKV2K8S_K8S_CONFIG\"",
"\"AKV2K8S_K8S_TEST_POD\"",
"\"AKV2K8S_K8S_TEST_NAMESPACE\"",
"\"AKV2K8S_K8S_TEST_POD_IP\""
] |
[] |
[
"AKV2K8S_K8S_TEST_POD_IP",
"AKV2K8S_K8S_TEST_POD",
"AKV2K8S_K8S_MASTER_URL",
"AKV2K8S_K8S_TEST_NAMESPACE",
"AKV2K8S_K8S_CONFIG"
] |
[]
|
["AKV2K8S_K8S_TEST_POD_IP", "AKV2K8S_K8S_TEST_POD", "AKV2K8S_K8S_MASTER_URL", "AKV2K8S_K8S_TEST_NAMESPACE", "AKV2K8S_K8S_CONFIG"]
|
go
| 5 | 0 | |
cmd/manager/main.go
|
package main
import (
"context"
"flag"
"fmt"
"github.com/operator-framework/operator-marketplace/pkg/builders"
v1 "k8s.io/api/apps/v1"
v12 "k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"net/http"
"os"
"runtime"
"strings"
"time"
"github.com/operator-framework/operator-marketplace/pkg/metrics"
"github.com/operator-framework/operator-marketplace/pkg/migrator"
apiconfigv1 "github.com/openshift/api/config/v1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/signals"
"github.com/operator-framework/operator-marketplace/pkg/apis"
configv1 "github.com/operator-framework/operator-marketplace/pkg/apis/config/v1"
olm "github.com/operator-framework/operator-marketplace/pkg/apis/olm/v1alpha1"
"github.com/operator-framework/operator-marketplace/pkg/controller"
"github.com/operator-framework/operator-marketplace/pkg/controller/options"
"github.com/operator-framework/operator-marketplace/pkg/defaults"
"github.com/operator-framework/operator-marketplace/pkg/operatorhub"
"github.com/operator-framework/operator-marketplace/pkg/status"
sourceCommit "github.com/operator-framework/operator-marketplace/pkg/version"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
"github.com/operator-framework/operator-sdk/pkg/leader"
sdkVersion "github.com/operator-framework/operator-sdk/version"
log "github.com/sirupsen/logrus"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
kruntime "k8s.io/apimachinery/pkg/runtime"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
const (
initialWait = time.Duration(1) * time.Minute
updateNotificationSendWait = time.Duration(10) * time.Minute
)
var (
version = flag.Bool("version", false, "displays marketplace source commit info.")
tlsKeyPath = flag.String("tls-key", "", "Path to use for private key (requires tls-cert)")
tlsCertPath = flag.String("tls-cert", "", "Path to use for certificate (requires tls-key)")
)
func printVersion() {
log.Printf("Go Version: %s", runtime.Version())
log.Printf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)
log.Printf("operator-sdk Version: %v", sdkVersion.Version)
}
func main() {
printVersion()
// Parse the command line arguments
flag.StringVar(&defaults.Dir, "defaultsDir",
"", "the directory where the default CatalogSources are stored")
var clusterOperatorName string
flag.StringVar(&clusterOperatorName, "clusterOperatorName", "", "the name of the OpenShift ClusterOperator that should reflect this operator's status, or the empty string to disable ClusterOperator updates")
flag.Parse()
// Check if version flag was set
if *version {
fmt.Print(sourceCommit.String())
// Exit immediately
os.Exit(0)
}
// set TLS to serve metrics over a secure channel if cert is provided
// cert is provided by default by the marketplace-trusted-ca volume mounted as part of the marketplace-operator deployment
var useTLS bool
if *tlsCertPath != "" && *tlsKeyPath == "" || *tlsCertPath == "" && *tlsKeyPath != "" {
log.Warn("both --tls-key and --tls-crt must be provided for TLS to be enabled, falling back to non-https")
} else if *tlsCertPath == "" && *tlsKeyPath == "" {
log.Info("TLS keys not set, using non-https for metrics")
} else {
log.Info("TLS keys set, using https for metrics")
useTLS = true
}
err := metrics.ServePrometheus(useTLS, *tlsCertPath, *tlsKeyPath)
if err != nil {
log.Fatalf("failed to serve prometheus metrics: TLS enabled %d: %s", useTLS, err)
}
namespace, err := k8sutil.GetWatchNamespace()
if err != nil {
log.Fatalf("failed to get watch namespace: %v", err)
}
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
log.Fatal(err)
}
// Set OpenShift config API availability
err = configv1.SetConfigAPIAvailability(cfg)
if err != nil {
log.Fatal(err)
}
// Create a new Cmd to provide shared dependencies and start components
// Even though we are asking to watch all namespaces, we only handle events
// from the operator's namespace. The reason for watching all namespaces is
// watch for CatalogSources in targetNamespaces being deleted and recreate
// them.
mgr, err := manager.New(cfg, manager.Options{Namespace: ""})
if err != nil {
log.Fatal(err)
}
log.Print("Registering Components.")
// Setup Scheme for all defined resources
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
exit(err)
}
// Add external resource to scheme
if err := olm.AddToScheme(mgr.GetScheme()); err != nil {
exit(err)
}
if err := v1beta1.AddToScheme(mgr.GetScheme()); err != nil {
exit(err)
}
// If the config API is available add the config resources to the scheme
if configv1.IsAPIAvailable() {
if err := apiconfigv1.AddToScheme(mgr.GetScheme()); err != nil {
exit(err)
}
}
stopCh := signals.Context().Done()
var statusReporter status.Reporter = &status.NoOpReporter{}
if clusterOperatorName != "" {
statusReporter, err = status.NewReporter(cfg, mgr, namespace, clusterOperatorName, os.Getenv("RELEASE_VERSION"), stopCh)
if err != nil {
exit(err)
}
}
// Populate the global default OperatorSources definition and config
err = defaults.PopulateGlobals()
if err != nil {
exit(err)
}
// Setup all Controllers
if err := controller.AddToManager(mgr, options.ControllerOptions{}); err != nil {
exit(err)
}
// Serve a health check.
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
go http.ListenAndServe(":8080", nil)
// Wait until this instance becomes the leader.
log.Info("Waiting to become leader.")
err = leader.Become(context.TODO(), "marketplace-operator-lock")
if err != nil {
log.Error(err, "Failed to retry for leader lock")
os.Exit(1)
}
log.Info("Elected leader.")
log.Print("Starting the Cmd.")
// migrate away from Marketplace API
clientGo, err := client.New(cfg, client.Options{Scheme: mgr.GetScheme()})
if err != nil && !k8sErrors.IsNotFound(err) {
log.Error(err, "Failed to instantiate client for migrator")
os.Exit(1)
}
migrator := migrator.New(clientGo)
err = migrator.Migrate()
if err != nil {
log.Error(err, "[migration] Error in migrating Marketplace away from OperatorSource API")
}
err = cleanUpOldOpsrcResources(clientGo)
if err != nil {
log.Error(err, "OperatorSource child resource cleanup failed")
}
// Handle the defaults
err = ensureDefaults(cfg, mgr.GetScheme())
if err != nil {
exit(err)
}
err = defaults.RemoveObsoleteOpsrc(clientGo)
if err != nil {
log.Error(err, "[defaults] Could not remove obsolete default OperatorSource/s")
}
// statusReportingDoneCh will be closed after the operator has successfully stopped reporting ClusterOperator status.
statusReportingDoneCh := statusReporter.StartReporting()
// Start the Cmd
err = mgr.Start(stopCh)
// Wait for ClusterOperator status reporting routine to close the statusReportingDoneCh channel.
<-statusReportingDoneCh
exit(err)
}
// exit stops the reporting of ClusterOperator status and exits with the proper exit code.
func exit(err error) {
// If an error exists then exit with status set to 1
if err != nil {
log.Fatalf("The operator encountered an error, exit code 1: %v", err)
}
// No error, graceful termination
log.Info("The operator exited gracefully, exit code 0")
os.Exit(0)
}
// ensureDefaults ensures that all the default OperatorSources are present on
// the cluster
func ensureDefaults(cfg *rest.Config, scheme *kruntime.Scheme) error {
// The default client serves read requests from the cache which only gets
// initialized after mgr.Start(). So we need to instantiate a new client
// for the defaults handler.
clientForDefaults, err := client.New(cfg, client.Options{Scheme: scheme})
if err != nil {
log.Errorf("Error initializing client for handling defaults - %v", err)
return err
}
if configv1.IsAPIAvailable() {
// Check if the cluster OperatorHub config resource is present.
operatorHubCluster := &apiconfigv1.OperatorHub{}
err = clientForDefaults.Get(context.TODO(), client.ObjectKey{Name: operatorhub.DefaultName}, operatorHubCluster)
// The default OperatorHub config resource is present which will take care of ensuring defaults
if err == nil {
return nil
}
}
// Ensure that the default OperatorSources are present based on the definitions
// in the defaults directory
result := defaults.New(defaults.GetGlobals()).EnsureAll(clientForDefaults)
if len(result) != 0 {
return fmt.Errorf("[defaults] Error ensuring default OperatorSource(s) - %v", result)
}
return nil
}
// cleanUpOldOpsrcResources cleans up old deployments and services associated with OperatorSources
func cleanUpOldOpsrcResources(kubeClient client.Client) error {
ctx := context.TODO()
deploy := &v1.DeploymentList{}
svc := &v12.ServiceList{}
o := &client.ListOptions{}
if err := o.SetLabelSelector(strings.Join([]string{builders.OpsrcOwnerNameLabel, builders.OpsrcOwnerNamespaceLabel}, ",")); err != nil {
return err
}
allErrors := []error{}
if err := kubeClient.List(ctx, o, deploy); err == nil {
for _, d := range deploy.Items {
if err := kubeClient.Delete(ctx, &d); err != nil {
allErrors = append(allErrors, err)
}
}
} else {
allErrors = append(allErrors, err)
}
if err := kubeClient.List(ctx, o, svc); err == nil {
for _, s := range svc.Items {
if err := kubeClient.Delete(ctx, &s); err != nil {
allErrors = append(allErrors, err)
}
}
} else {
allErrors = append(allErrors, err)
}
return utilerrors.NewAggregate(allErrors)
}
|
[
"\"RELEASE_VERSION\""
] |
[] |
[
"RELEASE_VERSION"
] |
[]
|
["RELEASE_VERSION"]
|
go
| 1 | 0 | |
mne/forward/forward.py
|
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
from time import time
from copy import deepcopy
import re
import numpy as np
from scipy import linalg, sparse
import shutil
import os
from os import path as op
import tempfile
from ..io import RawArray, Info
from ..io.constants import FIFF
from ..io.open import fiff_open
from ..io.tree import dir_tree_find
from ..io.tag import find_tag, read_tag
from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
write_named_matrix)
from ..io.meas_info import read_bad_channels, write_info
from ..io.pick import (pick_channels_forward, pick_info, pick_channels,
pick_types)
from ..io.write import (write_int, start_block, end_block,
write_coord_trans, write_ch_info, write_name_list,
write_string, start_file, end_file, write_id)
from ..io.base import BaseRaw
from ..evoked import Evoked, EvokedArray
from ..epochs import BaseEpochs
from ..source_space import (_read_source_spaces_from_tree,
find_source_space_hemi, _set_source_space_vertices,
_write_source_spaces_to_fid)
from ..source_estimate import _BaseSourceEstimate
from ..transforms import (transform_surface_to, invert_transform,
write_trans)
from ..utils import (_check_fname, get_subjects_dir, has_mne_c, warn,
run_subprocess, check_fname, logger, verbose, fill_doc,
_validate_type, _check_compensation_grade, _check_option)
from ..label import Label
from ..fixes import einsum
class Forward(dict):
"""Forward class to represent info from forward solution."""
def copy(self):
"""Copy the Forward instance."""
return Forward(deepcopy(self))
def __repr__(self):
"""Summarize forward info instead of printing all."""
entr = '<Forward'
nchan = len(pick_types(self['info'], meg=True, eeg=False, exclude=[]))
entr += ' | ' + 'MEG channels: %d' % nchan
nchan = len(pick_types(self['info'], meg=False, eeg=True, exclude=[]))
entr += ' | ' + 'EEG channels: %d' % nchan
src_types = np.array([src['type'] for src in self['src']])
if (src_types == 'surf').all():
entr += (' | Source space: Surface with %d vertices'
% self['nsource'])
elif (src_types == 'vol').all():
entr += (' | Source space: Volume with %d grid points'
% self['nsource'])
elif (src_types == 'discrete').all():
entr += (' | Source space: Discrete with %d dipoles'
% self['nsource'])
else:
count_string = ''
if (src_types == 'surf').any():
count_string += '%d surface, ' % (src_types == 'surf').sum()
if (src_types == 'vol').any():
count_string += '%d volume, ' % (src_types == 'vol').sum()
if (src_types == 'discrete').any():
count_string += '%d discrete, ' \
% (src_types == 'discrete').sum()
count_string = count_string.rstrip(', ')
entr += (' | Source space: Mixed (%s) with %d vertices'
% (count_string, self['nsource']))
if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:
entr += (' | Source orientation: Unknown')
elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
entr += (' | Source orientation: Fixed')
elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
entr += (' | Source orientation: Free')
entr += '>'
return entr
def _block_diag(A, n):
"""Construct a block diagonal from a packed structure.
You have to try it on a matrix to see what it's doing.
If A is not sparse, then returns a sparse block diagonal "bd",
diagonalized from the
elements in "A".
"A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
Each submatrix is ma x "n", and these submatrices are
placed down the diagonal of the matrix.
If A is already sparse, then the operation is reversed, yielding
a block
row matrix, where each set of n columns corresponds to a block element
from the block diagonal.
Parameters
----------
A : array
The matrix
n : int
The block size
Returns
-------
bd : sparse matrix
The block diagonal matrix
"""
if sparse.issparse(A): # then make block sparse
raise NotImplementedError('sparse reversal not implemented yet')
ma, na = A.shape
bdn = na // int(n) # number of submatrices
if na % n > 0:
raise ValueError('Width of matrix must be a multiple of n')
tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
tmp = np.tile(tmp, (1, n))
ii = tmp.ravel()
jj = np.arange(na, dtype=np.int)[None, :]
jj = jj * np.ones(ma, dtype=np.int)[:, None]
jj = jj.T.ravel() # column indices foreach sparse bd
bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
return bd
def _inv_block_diag(A, n):
"""Construct an inverse block diagonal from a packed structure.
You have to try it on a matrix to see what it's doing.
"A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
Each submatrix is ma x "n", and the inverses of these submatrices
are placed down the diagonal of the matrix.
Parameters
----------
A : array
The matrix.
n : int
The block size.
Returns
-------
bd : sparse matrix
The block diagonal matrix.
"""
ma, na = A.shape
bdn = na // int(n) # number of submatrices
if na % n > 0:
raise ValueError('Width of matrix must be a multiple of n')
# modify A in-place to invert each sub-block
A = A.copy()
for start in range(0, na, 3):
# this is a view
A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])
tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
tmp = np.tile(tmp, (1, n))
ii = tmp.ravel()
jj = np.arange(na, dtype=np.int)[None, :]
jj = jj * np.ones(ma, dtype=np.int)[:, None]
jj = jj.T.ravel() # column indices foreach sparse bd
bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
return bd
def _get_tag_int(fid, node, name, id_):
"""Check we have an appropriate tag."""
tag = find_tag(fid, node, id_)
if tag is None:
fid.close()
raise ValueError(name + ' tag not found')
return int(tag.data)
def _read_one(fid, node):
"""Read all interesting stuff for one forward solution."""
# This function assumes the fid is open as a context manager
if node is None:
return None
one = Forward()
one['source_ori'] = _get_tag_int(fid, node, 'Source orientation',
FIFF.FIFF_MNE_SOURCE_ORIENTATION)
one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame',
FIFF.FIFF_MNE_COORD_FRAME)
one['nsource'] = _get_tag_int(fid, node, 'Number of sources',
FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
one['nchan'] = _get_tag_int(fid, node, 'Number of channels',
FIFF.FIFF_NCHAN)
try:
one['sol'] = _read_named_matrix(fid, node,
FIFF.FIFF_MNE_FORWARD_SOLUTION,
transpose=True)
one['_orig_sol'] = one['sol']['data'].copy()
except Exception:
logger.error('Forward solution data not found')
raise
try:
fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD
one['sol_grad'] = _read_named_matrix(fid, node, fwd_type,
transpose=True)
one['_orig_sol_grad'] = one['sol_grad']['data'].copy()
except Exception:
one['sol_grad'] = None
if one['sol']['data'].shape[0] != one['nchan'] or \
(one['sol']['data'].shape[1] != one['nsource'] and
one['sol']['data'].shape[1] != 3 * one['nsource']):
raise ValueError('Forward solution matrix has wrong dimensions')
if one['sol_grad'] is not None:
if one['sol_grad']['data'].shape[0] != one['nchan'] or \
(one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and
one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):
raise ValueError('Forward solution gradient matrix has '
'wrong dimensions')
return one
def _read_forward_meas_info(tree, fid):
"""Read light measurement info from forward operator.
Parameters
----------
tree : tree
FIF tree structure.
fid : file id
The file id.
Returns
-------
info : instance of Info
The measurement info.
"""
# This function assumes fid is being used as a context manager
info = Info()
# Information from the MRI file
parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
raise ValueError('No parent MEG information found in operator')
parent_mri = parent_mri[0]
tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME)
info['mri_file'] = tag.data if tag is not None else None
tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID)
info['mri_id'] = tag.data if tag is not None else None
# Information from the MEG file
parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
if len(parent_meg) == 0:
raise ValueError('No parent MEG information found in operator')
parent_meg = parent_meg[0]
tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME)
info['meas_file'] = tag.data if tag is not None else None
tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID)
info['meas_id'] = tag.data if tag is not None else None
# Add channel information
chs = list()
for k in range(parent_meg['nent']):
kind = parent_meg['directory'][k].kind
pos = parent_meg['directory'][k].pos
if kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
info['chs'] = chs
info._update_redundant()
# Get the MRI <-> head coordinate transformation
tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
coord_head = FIFF.FIFFV_COORD_HEAD
coord_mri = FIFF.FIFFV_COORD_MRI
coord_device = FIFF.FIFFV_COORD_DEVICE
coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if tag is None:
raise ValueError('MRI/head coordinate transformation not found')
cand = tag.data
if cand['from'] == coord_mri and cand['to'] == coord_head:
info['mri_head_t'] = cand
else:
raise ValueError('MRI/head coordinate transformation not found')
# Get the MEG device <-> head coordinate transformation
tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)
if tag is None:
raise ValueError('MEG/head coordinate transformation not found')
cand = tag.data
if cand['from'] == coord_device and cand['to'] == coord_head:
info['dev_head_t'] = cand
elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:
info['ctf_head_t'] = cand
else:
raise ValueError('MEG/head coordinate transformation not found')
info['bads'] = read_bad_channels(fid, parent_meg)
# clean up our bad list, old versions could have non-existent bads
info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']]
# Check if a custom reference has been applied
tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF)
if tag is None:
tag = find_tag(fid, parent_mri, 236) # Constant 236 used before v0.11
info['custom_ref_applied'] = bool(tag.data) if tag is not None else False
info._check_consistency()
return info
def _subject_from_forward(forward):
"""Get subject id from inverse operator."""
return forward['src']._subject
@verbose
def _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None):
"""Merge loaded MEG and EEG forward dicts into one dict."""
if megfwd is not None and eegfwd is not None:
if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or
megfwd['source_ori'] != eegfwd['source_ori'] or
megfwd['nsource'] != eegfwd['nsource'] or
megfwd['coord_frame'] != eegfwd['coord_frame']):
raise ValueError('The MEG and EEG forward solutions do not match')
fwd = megfwd
fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']]
fwd['_orig_sol'] = np.r_[fwd['_orig_sol'], eegfwd['_orig_sol']]
fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow']
fwd['sol']['row_names'] = (fwd['sol']['row_names'] +
eegfwd['sol']['row_names'])
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'],
eegfwd['sol_grad']['data']]
fwd['_orig_sol_grad'] = np.r_[fwd['_orig_sol_grad'],
eegfwd['_orig_sol_grad']]
fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] +
eegfwd['sol_grad']['nrow'])
fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] +
eegfwd['sol_grad']['row_names'])
fwd['nchan'] = fwd['nchan'] + eegfwd['nchan']
logger.info(' MEG and EEG forward solutions combined')
elif megfwd is not None:
fwd = megfwd
else:
fwd = eegfwd
return fwd
@verbose
def read_forward_solution(fname, include=(), exclude=(), verbose=None):
"""Read a forward solution a.k.a. lead field.
Parameters
----------
fname : string
The file name, which should end with -fwd.fif or -fwd.fif.gz.
include : list, optional
List of names of channels to include. If empty all channels
are included.
exclude : list, optional
List of names of channels to exclude. If empty include all
channels.
%(verbose)s
Returns
-------
fwd : instance of Forward
The forward solution.
See Also
--------
write_forward_solution, make_forward_solution
Notes
-----
Forward solutions, which are derived from an original forward solution with
free orientation, are always stored on disk as forward solution with free
orientation in X/Y/Z RAS coordinates. To apply any transformation to the
forward operator (surface orientation, fixed orientation) please apply
:func:`convert_forward_solution` after reading the forward solution with
:func:`read_forward_solution`.
Forward solutions, which are derived from an original forward solution with
fixed orientation, are stored on disk as forward solution with fixed
surface-based orientations. Please note that the transformation to
surface-based, fixed orientation cannot be reverted after loading the
forward solution with :func:`read_forward_solution`.
"""
check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz'))
# Open the file, create directory
logger.info('Reading forward solution from %s...' % fname)
f, tree, _ = fiff_open(fname)
with f as fid:
# Find all forward solutions
fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
if len(fwds) == 0:
raise ValueError('No forward solutions in %s' % fname)
# Parent MRI data
parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
raise ValueError('No parent MRI information in %s' % fname)
parent_mri = parent_mri[0]
src = _read_source_spaces_from_tree(fid, tree, patch_stats=False)
for s in src:
s['id'] = find_source_space_hemi(s)
fwd = None
# Locate and read the forward solutions
megnode = None
eegnode = None
for k in range(len(fwds)):
tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)
if tag is None:
raise ValueError('Methods not listed for one of the forward '
'solutions')
if tag.data == FIFF.FIFFV_MNE_MEG:
megnode = fwds[k]
elif tag.data == FIFF.FIFFV_MNE_EEG:
eegnode = fwds[k]
megfwd = _read_one(fid, megnode)
if megfwd is not None:
if is_fixed_orient(megfwd):
ori = 'fixed'
else:
ori = 'free'
logger.info(' Read MEG forward solution (%d sources, '
'%d channels, %s orientations)'
% (megfwd['nsource'], megfwd['nchan'], ori))
eegfwd = _read_one(fid, eegnode)
if eegfwd is not None:
if is_fixed_orient(eegfwd):
ori = 'fixed'
else:
ori = 'free'
logger.info(' Read EEG forward solution (%d sources, '
'%d channels, %s orientations)'
% (eegfwd['nsource'], eegfwd['nchan'], ori))
fwd = _merge_meg_eeg_fwds(megfwd, eegfwd)
# Get the MRI <-> head coordinate transformation
tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
if tag is None:
raise ValueError('MRI/head coordinate transformation not found')
mri_head_t = tag.data
if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
mri_head_t = invert_transform(mri_head_t)
if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
fid.close()
raise ValueError('MRI/head coordinate transformation not '
'found')
fwd['mri_head_t'] = mri_head_t
#
# get parent MEG info
#
fwd['info'] = _read_forward_meas_info(tree, fid)
# MNE environment
parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if len(parent_env) > 0:
parent_env = parent_env[0]
tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)
if tag is not None:
fwd['info']['working_dir'] = tag.data
tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)
if tag is not None:
fwd['info']['command_line'] = tag.data
# Transform the source spaces to the correct coordinate frame
# if necessary
# Make sure forward solution is in either the MRI or HEAD coordinate frame
if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD):
raise ValueError('Only forward solutions computed in MRI or head '
'coordinates are acceptable')
# Transform each source space to the HEAD or MRI coordinate frame,
# depending on the coordinate frame of the forward solution
# NOTE: the function transform_surface_to will also work on discrete and
# volume sources
nuse = 0
for s in src:
try:
s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)
except Exception as inst:
raise ValueError('Could not transform source space (%s)' % inst)
nuse += s['nuse']
# Make sure the number of sources match after transformation
if nuse != fwd['nsource']:
raise ValueError('Source spaces do not match the forward solution.')
logger.info(' Source spaces transformed to the forward solution '
'coordinate frame')
fwd['src'] = src
# Handle the source locations and orientations
fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :]
for ss in src], axis=0)
# Store original source orientations
fwd['_orig_source_ori'] = fwd['source_ori']
# Deal with include and exclude
pick_channels_forward(fwd, include=include, exclude=exclude, copy=False)
if is_fixed_orient(fwd, orig=True):
fwd['source_nn'] = np.concatenate([_src['nn'][_src['vertno'], :]
for _src in fwd['src']], axis=0)
fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
fwd['surf_ori'] = True
else:
fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
fwd['surf_ori'] = False
return Forward(fwd)
@verbose
def convert_forward_solution(fwd, surf_ori=False, force_fixed=False,
copy=True, use_cps=True, verbose=None):
"""Convert forward solution between different source orientations.
Parameters
----------
fwd : Forward
The forward solution to modify.
surf_ori : bool, optional (default False)
Use surface-based source coordinate system? Note that force_fixed=True
implies surf_ori=True.
force_fixed : bool, optional (default False)
Force fixed source orientation mode?
copy : bool
Whether to return a new instance or modify in place.
use_cps : bool (default True)
Whether to use cortical patch statistics to define normal
orientations. Only used when surf_ori and/or force_fixed are True.
%(verbose)s
Returns
-------
fwd : Forward
The modified forward solution.
"""
fwd = fwd.copy() if copy else fwd
if force_fixed is True:
surf_ori = True
if any([src['type'] == 'vol' for src in fwd['src']]) and force_fixed:
raise ValueError(
'Forward operator was generated with sources from a '
'volume source space. Conversion to fixed orientation is not '
'possible. Consider using a discrete source space if you have '
'meaningful normal orientations.')
if surf_ori:
if use_cps:
if any(s.get('patch_inds') is not None for s in fwd['src']):
use_ave_nn = True
logger.info(' Average patch normals will be employed in '
'the rotation to the local surface coordinates..'
'..')
else:
use_ave_nn = False
logger.info(' No patch info available. The standard source '
'space normals will be employed in the rotation '
'to the local surface coordinates....')
else:
use_ave_nn = False
# We need to change these entries (only):
# 1. source_nn
# 2. sol['data']
# 3. sol['ncol']
# 4. sol_grad['data']
# 5. sol_grad['ncol']
# 6. source_ori
if is_fixed_orient(fwd, orig=True) or (force_fixed and not use_ave_nn):
# Fixed
fwd['source_nn'] = np.concatenate([s['nn'][s['vertno'], :]
for s in fwd['src']], axis=0)
if not is_fixed_orient(fwd, orig=True):
logger.info(' Changing to fixed-orientation forward '
'solution with surface-based source orientations...')
fix_rot = _block_diag(fwd['source_nn'].T, 1)
# newer versions of numpy require explicit casting here, so *= no
# longer works
fwd['sol']['data'] = (fwd['_orig_sol'] *
fix_rot).astype('float32')
fwd['sol']['ncol'] = fwd['nsource']
if fwd['sol_grad'] is not None:
x = sparse.block_diag([fix_rot] * 3)
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod
fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
fwd['surf_ori'] = True
elif surf_ori: # Free, surf-oriented
# Rotate the local source coordinate systems
fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
logger.info(' Converting to surface-based source orientations...')
# Actually determine the source orientations
pp = 0
for s in fwd['src']:
if s['type'] in ['surf', 'discrete']:
for p in range(s['nuse']):
# Project out the surface normal and compute SVD
if use_ave_nn and s.get('patch_inds') is not None:
nn = s['nn'][s['pinfo'][s['patch_inds'][p]], :]
nn = np.sum(nn, axis=0)[:, np.newaxis]
nn /= linalg.norm(nn)
else:
nn = s['nn'][s['vertno'][p], :][:, np.newaxis]
U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)
# Make sure that ez is in the direction of nn
if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:
U *= -1.0
fwd['source_nn'][pp:pp + 3, :] = U.T
pp += 3
else:
pp += 3 * s['nuse']
# Rotate the solution components as well
if force_fixed:
fwd['source_nn'] = fwd['source_nn'][2::3, :]
fix_rot = _block_diag(fwd['source_nn'].T, 1)
# newer versions of numpy require explicit casting here, so *= no
# longer works
fwd['sol']['data'] = (fwd['_orig_sol'] *
fix_rot).astype('float32')
fwd['sol']['ncol'] = fwd['nsource']
if fwd['sol_grad'] is not None:
x = sparse.block_diag([fix_rot] * 3)
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod
fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
fwd['surf_ori'] = True
else:
surf_rot = _block_diag(fwd['source_nn'].T, 3)
fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot
fwd['sol']['ncol'] = 3 * fwd['nsource']
if fwd['sol_grad'] is not None:
x = sparse.block_diag([surf_rot] * 3)
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod
fwd['sol_grad']['ncol'] = 9 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
fwd['surf_ori'] = True
else: # Free, cartesian
logger.info(' Cartesian source orientations...')
fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
fwd['sol']['data'] = fwd['_orig_sol'].copy()
fwd['sol']['ncol'] = 3 * fwd['nsource']
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy()
fwd['sol_grad']['ncol'] = 9 * fwd['nsource']
fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
fwd['surf_ori'] = False
logger.info(' [done]')
return fwd
@verbose
def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
"""Write forward solution to a file.
Parameters
----------
fname : str
File name to save the forward solution to. It should end with -fwd.fif
or -fwd.fif.gz.
fwd : Forward
Forward solution.
overwrite : bool
If True, overwrite destination file (if it exists).
%(verbose)s
See Also
--------
read_forward_solution
Notes
-----
Forward solutions, which are derived from an original forward solution with
free orientation, are always stored on disk as forward solution with free
orientation in X/Y/Z RAS coordinates. Transformations (surface orientation,
fixed orientation) will be reverted. To reapply any transformation to the
forward operator please apply :func:`convert_forward_solution` after
reading the forward solution with :func:`read_forward_solution`.
Forward solutions, which are derived from an original forward solution with
fixed orientation, are stored on disk as forward solution with fixed
surface-based orientations. Please note that the transformation to
surface-based, fixed orientation cannot be reverted after loading the
forward solution with :func:`read_forward_solution`.
"""
check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz'))
# check for file existence
_check_fname(fname, overwrite)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
#
# MNE env
#
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = fwd['info'].get('working_dir', None)
if data is not None:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = fwd['info'].get('command_line', None)
if data is not None:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
#
# Information from the MRI file
#
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])
if fwd['info']['mri_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])
# store the MRI to HEAD transform in MRI file
write_coord_trans(fid, fwd['info']['mri_head_t'])
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# write measurement info
write_forward_meas_info(fid, fwd['info'])
# invert our original source space transform
src = list()
for s in fwd['src']:
s = deepcopy(s)
try:
# returns source space to original coordinate frame
# usually MRI
s = transform_surface_to(s, fwd['mri_head_t']['from'],
fwd['mri_head_t'])
except Exception as inst:
raise ValueError('Could not transform source space (%s)' % inst)
src.append(s)
#
# Write the source spaces (again)
#
_write_source_spaces_to_fid(fid, src)
n_vert = sum([ss['nuse'] for ss in src])
if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
n_col = n_vert
else:
n_col = 3 * n_vert
# Undo transformations
sol = fwd['_orig_sol'].copy()
if fwd['sol_grad'] is not None:
sol_grad = fwd['_orig_sol_grad'].copy()
else:
sol_grad = None
if fwd['surf_ori'] is True:
if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
warn('The forward solution, which is stored on disk now, is based '
'on a forward solution with fixed orientation. Please note '
'that the transformation to surface-based, fixed orientation '
'cannot be reverted after loading the forward solution with '
'read_forward_solution.', RuntimeWarning)
else:
warn('This forward solution is based on a forward solution with '
'free orientation. The original forward solution is stored '
'on disk in X/Y/Z RAS coordinates. Any transformation '
'(surface orientation or fixed orientation) will be '
'reverted. To reapply any transformation to the forward '
'operator please apply convert_forward_solution after '
'reading the forward solution with read_forward_solution.',
RuntimeWarning)
#
# MEG forward solution
#
picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False,
exclude=[])
picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False,
exclude=[])
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg]
row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg]
if n_meg > 0:
meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col,
row_names=row_names_meg, col_names=[])
_transpose_named_matrix(meg_solution)
start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,
fwd['_orig_source_ori'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
write_int(fid, FIFF.FIFF_NCHAN, n_meg)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)
if sol_grad is not None:
meg_solution_grad = dict(data=sol_grad[picks_meg],
nrow=n_meg, ncol=n_col * 3,
row_names=row_names_meg, col_names=[])
_transpose_named_matrix(meg_solution_grad)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
meg_solution_grad)
end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
#
# EEG forward solution
#
if n_eeg > 0:
eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col,
row_names=row_names_eeg, col_names=[])
_transpose_named_matrix(eeg_solution)
start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,
fwd['_orig_source_ori'])
write_int(fid, FIFF.FIFF_NCHAN, n_eeg)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)
if sol_grad is not None:
eeg_solution_grad = dict(data=sol_grad[picks_eeg],
nrow=n_eeg, ncol=n_col * 3,
row_names=row_names_eeg, col_names=[])
_transpose_named_matrix(eeg_solution_grad)
write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
eeg_solution_grad)
end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def is_fixed_orient(forward, orig=False):
"""Check if the forward operator is fixed orientation.
Parameters
----------
forward : instance of Forward
The forward.
orig : bool
If True, consider the original source orientation.
If False (default), consider the current source orientation.
Returns
-------
fixed_ori : bool
Whether or not it is fixed orientation.
"""
if orig: # if we want to know about the original version
fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
else: # most of the time we want to know about the current version
fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
return fixed_ori
def write_forward_meas_info(fid, info):
"""Write measurement info stored in forward solution.
Parameters
----------
fid : file id
The file id
info : instance of Info
The measurement info.
"""
info._check_consistency()
#
# Information from the MEG file
#
start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# get transformation from CTF and DEVICE to HEAD coordinate frame
meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))
if meg_head_t is None:
fid.close()
raise ValueError('Head<-->sensor transform not found')
write_coord_trans(fid, meg_head_t)
if 'chs' in info:
# Channel information
write_int(fid, FIFF.FIFF_NCHAN, len(info['chs']))
for k, c in enumerate(info['chs']):
# Scan numbers may have been messed up
c = deepcopy(c)
c['scanno'] = k + 1
write_ch_info(fid, c)
if 'bads' in info and len(info['bads']) > 0:
# Bad channels
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
def _select_orient_forward(forward, info, noise_cov=None, copy=True):
"""Prepare forward solution for inverse solvers."""
# fwd['sol']['row_names'] may be different order from fwd['info']['chs']
fwd_sol_ch_names = forward['sol']['row_names']
all_ch_names = set(fwd_sol_ch_names)
all_bads = set(info['bads'])
if noise_cov is not None:
all_ch_names &= set(noise_cov['names'])
all_bads |= set(noise_cov['bads'])
else:
noise_cov = dict(bads=info['bads'])
ch_names = [c['ch_name'] for c in info['chs']
if c['ch_name'] not in all_bads and
c['ch_name'] in all_ch_names]
if not len(info['bads']) == len(noise_cov['bads']) or \
not all(b in noise_cov['bads'] for b in info['bads']):
logger.info('info["bads"] and noise_cov["bads"] do not match, '
'excluding bad channels from both')
# check the compensation grade
_check_compensation_grade(forward['info'], info, 'forward')
n_chan = len(ch_names)
logger.info("Computing inverse operator with %d channels." % n_chan)
forward = pick_channels_forward(forward, ch_names, ordered=True,
copy=copy)
info_idx = [info['ch_names'].index(name) for name in ch_names]
info_picked = pick_info(info, info_idx)
forward['info']._check_consistency()
info_picked._check_consistency()
return forward, info_picked
@verbose
def compute_orient_prior(forward, loose=0.2, verbose=None):
"""Compute orientation prior.
Parameters
----------
forward : instance of Forward
Forward operator.
loose : float
The loose orientation parameter (between 0 and 1).
%(verbose)s
Returns
-------
orient_prior : ndarray, shape (n_vertices,)
Orientation priors.
See Also
--------
compute_depth_prior
"""
is_fixed_ori = is_fixed_orient(forward)
n_sources = forward['sol']['data'].shape[1]
loose = float(loose)
if not (0 <= loose <= 1):
raise ValueError('loose value should be between 0 and 1, '
'got %s.' % (loose,))
orient_prior = np.ones(n_sources, dtype=np.float)
if loose > 0.:
if is_fixed_ori:
raise ValueError('loose must be 0. with forward operator '
'with fixed orientation, got %s' % (loose,))
if loose < 1:
if not forward['surf_ori']:
raise ValueError('Forward operator is not oriented in surface '
'coordinates. loose parameter should be 1 '
'not %s.' % (loose,))
logger.info('Applying loose dipole orientations. Loose value '
'of %s.' % loose)
orient_prior[0::3] *= loose
orient_prior[1::3] *= loose
return orient_prior
def _restrict_gain_matrix(G, info):
"""Restrict gain matrix entries for optimal depth weighting."""
# Figure out which ones have been used
if len(info['chs']) != G.shape[0]:
raise ValueError('G.shape[0] (%d) and length of info["chs"] (%d) '
'do not match' % (G.shape[0], len(info['chs'])))
for meg, eeg, kind in (
('grad', False, 'planar'),
('mag', False, 'magnetometer or axial gradiometer'),
(False, True, 'EEG')):
sel = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[])
if len(sel) > 0:
logger.info(' %d %s channels' % (len(sel), kind))
break
else:
warn('Could not find MEG or EEG channels to limit depth channels')
sel = slice(None)
return G[sel]
@verbose
def compute_depth_prior(forward, info, exp=0.8, limit=10.0,
limit_depth_chs=False, combine_xyz='spectral',
noise_cov=None, rank=None, verbose=None):
"""Compute depth prior for depth weighting.
Parameters
----------
forward : instance of Forward
The forward solution.
info : instance of Info
The measurement info.
exp : float
Exponent for the depth weighting, must be between 0 and 1.
limit : float | None
The upper bound on depth weighting.
Can be None to be bounded by the largest finite prior.
limit_depth_chs : bool | 'whiten'
How to deal with multiple channel types in depth weighting.
The default is True, which whitens based on the source sensitivity
of the highest-SNR channel type. See Notes for details.
.. versionchanged:: 0.18
Added the "whiten" option.
combine_xyz : 'spectral' | 'fro'
When a loose (or free) orientation is used, how the depth weighting
for each triplet should be calculated.
If 'spectral', use the squared spectral norm of Gk.
If 'fro', use the squared Frobenius norm of Gk.
.. versionadded:: 0.18
noise_cov : instance of Covariance | None
The noise covariance to use to whiten the gain matrix when
``limit_depth_chs='whiten'``.
.. versionadded:: 0.18
%(rank_None)s
.. versionadded:: 0.18
%(verbose)s
Returns
-------
depth_prior : ndarray, shape (n_vertices,)
The depth prior.
See Also
--------
compute_orient_prior
Notes
-----
The defaults used by the minimum norm code and sparse solvers differ.
In particular, the values for MNE are::
compute_depth_prior(..., limit=10., limit_depth_chs=True,
combine_xyz='spectral')
In sparse solvers and LCMV, the values are::
compute_depth_prior(..., limit=None, limit_depth_chs='whiten',
combine_xyz='fro')
The ``limit_depth_chs`` argument can take the following values:
* :data:`python:True` (default)
Use only grad channels in depth weighting (equivalent to MNE C
minimum-norm code). If grad channels aren't present, only mag
channels will be used (if no mag, then eeg). This makes the depth
prior dependent only on the sensor geometry (and relationship
to the sources).
* ``'whiten'``
Compute a whitener and apply it to the gain matirx before computing
the depth prior. In this case ``noise_cov`` must not be None.
Whitening the gain matrix makes the depth prior
depend on both sensor geometry and the data of interest captured
by the noise covariance (e.g., projections, SNR).
.. versionadded:: 0.18
* :data:`python:False`
Use all channels. Not recommended since the depth weighting will be
biased toward whichever channel type has the largest values in
SI units (such as EEG being orders of magnitude larger than MEG).
"""
from ..cov import Covariance, compute_whitener
_validate_type(forward, Forward, 'forward')
patch_areas = forward.get('patch_areas', None)
is_fixed_ori = is_fixed_orient(forward)
G = forward['sol']['data']
logger.info('Creating the depth weighting matrix...')
_validate_type(noise_cov, (Covariance, None), 'noise_cov',
'Covariance or None')
_validate_type(limit_depth_chs, (str, bool), 'limit_depth_chs')
if isinstance(limit_depth_chs, str):
if limit_depth_chs != 'whiten':
raise ValueError('limit_depth_chs, if str, must be "whiten", got '
'%s' % (limit_depth_chs,))
if not isinstance(noise_cov, Covariance):
raise ValueError('With limit_depth_chs="whiten", noise_cov must be'
' a Covariance, got %s' % (type(noise_cov),))
if combine_xyz is not False: # private / expert option
_check_option('combine_xyz', combine_xyz, ('fro', 'spectral'))
# If possible, pick best depth-weighting channels
if limit_depth_chs is True:
G = _restrict_gain_matrix(G, info)
elif limit_depth_chs == 'whiten':
whitener, _ = compute_whitener(noise_cov, info, pca=True, rank=rank,
verbose=False)
G = np.dot(whitener, G)
# Compute the gain matrix
if is_fixed_ori or combine_xyz in ('fro', False):
d = np.sum(G ** 2, axis=0)
if not (is_fixed_ori or combine_xyz is False):
d = d.reshape(-1, 3).sum(axis=1)
# Spherical leadfield can be zero at the center
d[d == 0.] = np.min(d[d != 0.])
else: # 'spectral'
# n_pos = G.shape[1] // 3
# The following is equivalent to this, but 4-10x faster
# d = np.zeros(n_pos)
# for k in range(n_pos):
# Gk = G[:, 3 * k:3 * (k + 1)]
# x = np.dot(Gk.T, Gk)
# d[k] = linalg.svdvals(x)[0]
G.shape = (G.shape[0], -1, 3)
d = np.linalg.norm(einsum('svj,svk->vjk', G, G), # vector dot products
ord=2, axis=(1, 2)) # ord=2 spectral (largest s.v.)
G.shape = (G.shape[0], -1)
# XXX Currently the fwd solns never have "patch_areas" defined
if patch_areas is not None:
if not is_fixed_ori and combine_xyz is False:
patch_areas = np.repeat(patch_areas, 3)
d /= patch_areas ** 2
logger.info(' Patch areas taken into account in the depth '
'weighting')
w = 1.0 / d
if limit is not None:
ws = np.sort(w)
weight_limit = limit ** 2
if limit_depth_chs is False:
# match old mne-python behavor
# we used to do ind = np.argmin(ws), but this is 0 by sort above
n_limit = 0
limit = ws[0] * weight_limit
else:
# match C code behavior
limit = ws[-1]
n_limit = len(d)
if ws[-1] > weight_limit * ws[0]:
ind = np.where(ws > weight_limit * ws[0])[0][0]
limit = ws[ind]
n_limit = ind
logger.info(' limit = %d/%d = %f'
% (n_limit + 1, len(d),
np.sqrt(limit / ws[0])))
scale = 1.0 / limit
logger.info(' scale = %g exp = %g' % (scale, exp))
w = np.minimum(w / limit, 1)
depth_prior = w ** exp
if not (is_fixed_ori or combine_xyz is False):
depth_prior = np.repeat(depth_prior, 3)
return depth_prior
def _stc_src_sel(src, stc, on_missing='raise',
extra=', likely due to forward calculations'):
"""Select the vertex indices of a source space using a source estimate."""
if isinstance(stc, list):
vertices = stc
else:
assert isinstance(stc, _BaseSourceEstimate)
vertices = stc._vertices_list
del stc
if not len(src) == len(vertices):
raise RuntimeError('Mismatch between number of source spaces (%s) and '
'STC vertices (%s)' % (len(src), len(vertices)))
src_sels, stc_sels, out_vertices = [], [], []
src_offset = stc_offset = 0
for s, v in zip(src, vertices):
joint_sel = np.intersect1d(s['vertno'], v)
src_sels.append(np.searchsorted(s['vertno'], joint_sel) + src_offset)
src_offset += len(s['vertno'])
idx = np.searchsorted(v, joint_sel)
stc_sels.append(idx + stc_offset)
stc_offset += len(v)
out_vertices.append(np.array(v)[idx])
src_sel = np.concatenate(src_sels)
stc_sel = np.concatenate(stc_sels)
assert len(src_sel) == len(stc_sel) == sum(len(v) for v in out_vertices)
n_stc = sum(len(v) for v in vertices)
n_joint = len(src_sel)
if n_joint != n_stc:
msg = ('Only %i of %i SourceEstimate %s found in '
'source space%s'
% (n_joint, n_stc, 'vertex' if n_stc == 1 else 'vertices',
extra))
if on_missing == 'raise':
raise RuntimeError(msg)
elif on_missing == 'warn':
warn(msg)
else:
assert on_missing == 'ignore'
return src_sel, stc_sel, out_vertices
def _fill_measurement_info(info, fwd, sfreq):
"""Fill the measurement info of a Raw or Evoked object."""
sel = pick_channels(info['ch_names'], fwd['sol']['row_names'])
info = pick_info(info, sel)
info['bads'] = []
# this is probably correct based on what's done in meas_info.py...
info['meas_id'] = fwd['info']['meas_id']
info['file_id'] = info['meas_id']
now = time()
sec = np.floor(now)
usec = 1e6 * (now - sec)
info['meas_date'] = (int(sec), int(usec))
info['highpass'] = 0.0
info['lowpass'] = sfreq / 2.0
info['sfreq'] = sfreq
info['projs'] = []
return info
@verbose
def _apply_forward(fwd, stc, start=None, stop=None, on_missing='raise',
verbose=None):
"""Apply forward model and return data, times, ch_names."""
if not is_fixed_orient(fwd):
raise ValueError('Only fixed-orientation forward operators are '
'supported.')
if np.all(stc.data > 0):
warn('Source estimate only contains currents with positive values. '
'Use pick_ori="normal" when computing the inverse to compute '
'currents not current magnitudes.')
max_cur = np.max(np.abs(stc.data))
if max_cur > 1e-7: # 100 nAm threshold for warning
warn('The maximum current magnitude is %0.1f nAm, which is very large.'
' Are you trying to apply the forward model to noise-normalized '
'(dSPM, sLORETA, or eLORETA) values? The result will only be '
'correct if currents (in units of Am) are used.'
% (1e9 * max_cur))
src_sel, stc_sel, _ = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)
gain = fwd['sol']['data'][:, src_sel]
# save some memory if possible
stc_sel = slice(None) if len(stc_sel) == len(stc.data) else stc_sel
logger.info('Projecting source estimate to sensor space...')
data = np.dot(gain, stc.data[stc_sel, start:stop])
logger.info('[done]')
times = deepcopy(stc.times[start:stop])
return data, times
@verbose
def apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True,
on_missing='raise', verbose=None):
"""Project source space currents to sensor space using a forward operator.
The sensor space data is computed for all channels present in fwd. Use
pick_channels_forward or pick_types_forward to restrict the solution to a
subset of channels.
The function returns an Evoked object, which is constructed from
evoked_template. The evoked_template should be from the same MEG system on
which the original data was acquired. An exception will be raised if the
forward operator contains channels that are not present in the template.
Parameters
----------
fwd : Forward
Forward operator to use.
stc : SourceEstimate
The source estimate from which the sensor space data is computed.
info : instance of Info
Measurement info to generate the evoked.
start : int, optional
Index of first time sample (index not time is seconds).
stop : int, optional
Index of first time sample not to include (index not time is seconds).
use_cps : bool (default True)
Whether to use cortical patch statistics to define normal
orientations when converting to fixed orientation (if necessary).
.. versionadded:: 0.15
%(on_missing)s Default is "raise".
.. versionadded:: 0.18
%(verbose)s
Returns
-------
evoked : Evoked
Evoked object with computed sensor space data.
See Also
--------
apply_forward_raw: Compute sensor space data and return a Raw object.
"""
# make sure evoked_template contains all channels in fwd
for ch_name in fwd['sol']['row_names']:
if ch_name not in info['ch_names']:
raise ValueError('Channel %s of forward operator not present in '
'evoked_template.' % ch_name)
# project the source estimate to the sensor space
if not is_fixed_orient(fwd):
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=use_cps)
data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)
# fill the measurement info
sfreq = float(1.0 / stc.tstep)
info_out = _fill_measurement_info(info, fwd, sfreq)
evoked = EvokedArray(data, info_out, times[0], nave=1)
evoked.times = times
evoked.first = int(np.round(evoked.times[0] * sfreq))
evoked.last = evoked.first + evoked.data.shape[1] - 1
return evoked
@verbose
def apply_forward_raw(fwd, stc, info, start=None, stop=None,
on_missing='raise', verbose=None):
"""Project source space currents to sensor space using a forward operator.
The sensor space data is computed for all channels present in fwd. Use
pick_channels_forward or pick_types_forward to restrict the solution to a
subset of channels.
The function returns a Raw object, which is constructed using provided
info. The info object should be from the same MEG system on which the
original data was acquired. An exception will be raised if the forward
operator contains channels that are not present in the info.
Parameters
----------
fwd : Forward
Forward operator to use. Has to be fixed-orientation.
stc : SourceEstimate
The source estimate from which the sensor space data is computed.
info : instance of Info
The measurement info.
start : int, optional
Index of first time sample (index not time is seconds).
stop : int, optional
Index of first time sample not to include (index not time is seconds).
%(on_missing)s Default is "raise".
.. versionadded:: 0.18
%(verbose)s
Returns
-------
raw : Raw object
Raw object with computed sensor space data.
See Also
--------
apply_forward: Compute sensor space data and return an Evoked object.
"""
# make sure info contains all channels in fwd
for ch_name in fwd['sol']['row_names']:
if ch_name not in info['ch_names']:
raise ValueError('Channel %s of forward operator not present in '
'info.' % ch_name)
# project the source estimate to the sensor space
data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)
sfreq = 1.0 / stc.tstep
info = _fill_measurement_info(info, fwd, sfreq)
info['projs'] = []
# store sensor data in Raw object using the info
raw = RawArray(data, info)
raw.preload = True
raw._first_samps = np.array([int(np.round(times[0] * sfreq))])
raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1])
raw._projector = None
raw._update_times()
return raw
@fill_doc
def restrict_forward_to_stc(fwd, stc, on_missing='ignore'):
"""Restrict forward operator to active sources in a source estimate.
Parameters
----------
fwd : instance of Forward
Forward operator.
stc : instance of SourceEstimate
Source estimate.
%(on_missing)s Default is "ignore".
.. versionadded:: 0.18
Returns
-------
fwd_out : instance of Forward
Restricted forward operator.
See Also
--------
restrict_forward_to_label
"""
_validate_type(on_missing, str, 'on_missing')
_check_option('on_missing', on_missing, ('ignore', 'warn', 'raise'))
src_sel, _, vertices = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)
del stc
return _restrict_forward_to_src_sel(fwd, src_sel)
def _restrict_forward_to_src_sel(fwd, src_sel):
fwd_out = deepcopy(fwd)
# figure out the vertno we are keeping
idx_sel = np.concatenate([[[si] * len(s['vertno']), s['vertno']]
for si, s in enumerate(fwd['src'])], axis=-1)
assert idx_sel.ndim == 2 and idx_sel.shape[0] == 2
assert idx_sel.shape[1] == fwd['nsource']
idx_sel = idx_sel[:, src_sel]
fwd_out['source_rr'] = fwd['source_rr'][src_sel]
fwd_out['nsource'] = len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
if fwd['sol_grad'] is not None:
idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
if fwd['sol_grad'] is not None:
idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()
fwd_out['source_nn'] = fwd['source_nn'][idx]
fwd_out['sol']['data'] = fwd['sol']['data'][:, idx]
if fwd['sol_grad'] is not None:
fwd_out['sol_grad']['data'] = fwd['sol_grad']['data'][:, idx_grad]
fwd_out['sol']['ncol'] = len(idx)
if is_fixed_orient(fwd, orig=True):
idx = src_sel
if fwd['sol_grad'] is not None:
idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
if fwd['sol_grad'] is not None:
idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()
fwd_out['_orig_sol'] = fwd['_orig_sol'][:, idx]
if fwd['sol_grad'] is not None:
fwd_out['_orig_sol_grad'] = fwd['_orig_sol_grad'][:, idx_grad]
vertices = [idx_sel[1][idx_sel[0] == si]
for si in range(len(fwd_out['src']))]
_set_source_space_vertices(fwd_out['src'], vertices)
return fwd_out
def restrict_forward_to_label(fwd, labels):
"""Restrict forward operator to labels.
Parameters
----------
fwd : Forward
Forward operator.
labels : instance of Label | list
Label object or list of label objects.
Returns
-------
fwd_out : dict
Restricted forward operator.
See Also
--------
restrict_forward_to_stc
"""
vertices = [np.array([], int), np.array([], int)]
if not isinstance(labels, list):
labels = [labels]
# Get vertices separately of each hemisphere from all label
for label in labels:
_validate_type(label, Label, "label", "Label or list")
i = 0 if label.hemi == 'lh' else 1
vertices[i] = np.append(vertices[i], label.vertices)
# Remove duplicates and sort
vertices = [np.unique(vert_hemi) for vert_hemi in vertices]
fwd_out = deepcopy(fwd)
fwd_out['source_rr'] = np.zeros((0, 3))
fwd_out['nsource'] = 0
fwd_out['source_nn'] = np.zeros((0, 3))
fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0))
fwd_out['_orig_sol'] = np.zeros((fwd['_orig_sol'].shape[0], 0))
if fwd['sol_grad'] is not None:
fwd_out['sol_grad']['data'] = np.zeros(
(fwd['sol_grad']['data'].shape[0], 0))
fwd_out['_orig_sol_grad'] = np.zeros(
(fwd['_orig_sol_grad'].shape[0], 0))
fwd_out['sol']['ncol'] = 0
nuse_lh = fwd['src'][0]['nuse']
for i in range(2):
fwd_out['src'][i]['vertno'] = np.array([], int)
fwd_out['src'][i]['nuse'] = 0
fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
fwd_out['src'][i]['inuse'].fill(0)
fwd_out['src'][i]['use_tris'] = np.array([[]], int)
fwd_out['src'][i]['nuse_tri'] = np.array([0])
# src_sel is idx to cols in fwd that are in any label per hemi
src_sel = np.intersect1d(fwd['src'][i]['vertno'], vertices[i])
src_sel = np.searchsorted(fwd['src'][i]['vertno'], src_sel)
# Reconstruct each src
vertno = fwd['src'][i]['vertno'][src_sel]
fwd_out['src'][i]['inuse'][vertno] = 1
fwd_out['src'][i]['nuse'] += len(vertno)
fwd_out['src'][i]['vertno'] = np.where(fwd_out['src'][i]['inuse'])[0]
# Reconstruct part of fwd that is not sol data
src_sel += i * nuse_lh # Add column shift to right hemi
fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],
fwd['source_rr'][src_sel]])
fwd_out['nsource'] += len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
if fwd['sol_grad'] is not None:
idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
if fwd['sol_grad'] is not None:
idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()
fwd_out['source_nn'] = np.vstack(
[fwd_out['source_nn'], fwd['source_nn'][idx]])
fwd_out['sol']['data'] = np.hstack(
[fwd_out['sol']['data'], fwd['sol']['data'][:, idx]])
if fwd['sol_grad'] is not None:
fwd_out['sol_grad']['data'] = np.hstack(
[fwd_out['sol_grad']['data'],
fwd['sol_rad']['data'][:, idx_grad]])
fwd_out['sol']['ncol'] += len(idx)
if is_fixed_orient(fwd, orig=True):
idx = src_sel
if fwd['sol_grad'] is not None:
idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
if fwd['sol_grad'] is not None:
idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()
fwd_out['_orig_sol'] = np.hstack(
[fwd_out['_orig_sol'], fwd['_orig_sol'][:, idx]])
if fwd['sol_grad'] is not None:
fwd_out['_orig_sol_grad'] = np.hstack(
[fwd_out['_orig_sol_grad'],
fwd['_orig_sol_grad'][:, idx_grad]])
return fwd_out
def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
mindist=None, bem=None, mri=None, trans=None,
eeg=True, meg=True, fixed=False, grad=False,
mricoord=False, overwrite=False, subjects_dir=None,
verbose=None):
"""Calculate a forward solution for a subject using MNE-C routines.
This is kept around for testing purposes.
This function wraps to mne_do_forward_solution, so the mne
command-line tools must be installed and accessible from Python.
Parameters
----------
subject : str
Name of the subject.
meas : Raw | Epochs | Evoked | str
If Raw or Epochs, a temporary evoked file will be created and
saved to a temporary directory. If str, then it should be a
filename to a file with measurement information the mne
command-line tools can understand (i.e., raw or evoked).
fname : str | None
Destination forward solution filename. If None, the solution
will be created in a temporary directory, loaded, and deleted.
src : str | None
Source space name. If None, the MNE default is used.
spacing : str
The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a
recursively subdivided icosahedron, or ``'oct#'`` for a recursively
subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm.
mindist : float | str | None
Minimum distance of sources from inner skull surface (in mm).
If None, the MNE default value is used. If string, 'all'
indicates to include all points.
bem : str | None
Name of the BEM to use (e.g., "sample-5120-5120-5120"). If None
(Default), the MNE default will be used.
mri : str | None
The name of the trans file in FIF format.
If None, trans must not be None.
trans : dict | str | None
File name of the trans file in text format.
If None, mri must not be None.
eeg : bool
If True (Default), include EEG computations.
meg : bool
If True (Default), include MEG computations.
fixed : bool
If True, make a fixed-orientation forward solution (Default:
False). Note that fixed-orientation inverses can still be
created from free-orientation forward solutions.
grad : bool
If True, compute the gradient of the field with respect to the
dipole coordinates as well (Default: False).
mricoord : bool
If True, calculate in MRI coordinates (Default: False).
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
%(verbose)s
See Also
--------
make_forward_solution
Returns
-------
fwd : Forward
The generated forward solution.
"""
if not has_mne_c():
raise RuntimeError('mne command line tools could not be found')
# check for file existence
temp_dir = tempfile.mkdtemp()
if fname is None:
fname = op.join(temp_dir, 'temp-fwd.fif')
_check_fname(fname, overwrite)
_validate_type(subject, "str", "subject")
# check for meas to exist as string, or try to make evoked
if isinstance(meas, str):
if not op.isfile(meas):
raise IOError('measurement file "%s" could not be found' % meas)
elif isinstance(meas, (BaseRaw, BaseEpochs, Evoked)):
meas_file = op.join(temp_dir, 'info.fif')
write_info(meas_file, meas.info)
meas = meas_file
else:
raise ValueError('meas must be string, Raw, Epochs, or Evoked')
# deal with trans/mri
if mri is not None and trans is not None:
raise ValueError('trans and mri cannot both be specified')
if mri is None and trans is None:
# MNE allows this to default to a trans/mri in the subject's dir,
# but let's be safe here and force the user to pass us a trans/mri
raise ValueError('Either trans or mri must be specified')
if trans is not None:
_validate_type(trans, "str", "trans")
if not op.isfile(trans):
raise IOError('trans file "%s" not found' % trans)
if mri is not None:
# deal with trans
if not isinstance(mri, str):
if isinstance(mri, dict):
mri_data = deepcopy(mri)
mri = op.join(temp_dir, 'mri-trans.fif')
try:
write_trans(mri, mri_data)
except Exception:
raise IOError('mri was a dict, but could not be '
'written to disk as a transform file')
else:
raise ValueError('trans must be a string or dict (trans)')
if not op.isfile(mri):
raise IOError('trans file "%s" could not be found' % trans)
# deal with meg/eeg
if not meg and not eeg:
raise ValueError('meg or eeg (or both) must be True')
path, fname = op.split(fname)
if not op.splitext(fname)[1] == '.fif':
raise ValueError('Forward name does not end with .fif')
path = op.abspath(path)
# deal with mindist
if mindist is not None:
if isinstance(mindist, str):
if not mindist.lower() == 'all':
raise ValueError('mindist, if string, must be "all"')
mindist = ['--all']
else:
mindist = ['--mindist', '%g' % mindist]
# src, spacing, bem
for element, name, kind in zip((src, spacing, bem),
("src", "spacing", "bem"),
('path-like', 'str', 'path-like')):
if element is not None:
_validate_type(element, kind, name, "%s or None" % kind)
# put together the actual call
cmd = ['mne_do_forward_solution',
'--subject', subject,
'--meas', meas,
'--fwd', fname,
'--destdir', path]
if src is not None:
cmd += ['--src', src]
if spacing is not None:
if spacing.isdigit():
pass # spacing in mm
else:
# allow both "ico4" and "ico-4" style values
match = re.match(r"(oct|ico)-?(\d+)$", spacing)
if match is None:
raise ValueError("Invalid spacing parameter: %r" % spacing)
spacing = '-'.join(match.groups())
cmd += ['--spacing', spacing]
if mindist is not None:
cmd += mindist
if bem is not None:
cmd += ['--bem', bem]
if mri is not None:
cmd += ['--mri', '%s' % mri]
if trans is not None:
cmd += ['--trans', '%s' % trans]
if not meg:
cmd.append('--eegonly')
if not eeg:
cmd.append('--megonly')
if fixed:
cmd.append('--fixed')
if grad:
cmd.append('--grad')
if mricoord:
cmd.append('--mricoord')
if overwrite:
cmd.append('--overwrite')
env = os.environ.copy()
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
env['SUBJECTS_DIR'] = subjects_dir
try:
logger.info('Running forward solution generation command with '
'subjects_dir %s' % subjects_dir)
run_subprocess(cmd, env=env)
except Exception:
raise
else:
fwd = read_forward_solution(op.join(path, fname), verbose=False)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
return fwd
@verbose
def average_forward_solutions(fwds, weights=None):
"""Average forward solutions.
Parameters
----------
fwds : list of Forward
Forward solutions to average. Each entry (dict) should be a
forward solution.
weights : array | None
Weights to apply to each forward solution in averaging. If None,
forward solutions will be equally weighted. Weights must be
non-negative, and will be adjusted to sum to one.
Returns
-------
fwd : Forward
The averaged forward solution.
"""
# check for fwds being a list
_validate_type(fwds, list, "fwds")
if not len(fwds) > 0:
raise ValueError('fwds must not be empty')
# check weights
if weights is None:
weights = np.ones(len(fwds))
weights = np.asanyarray(weights) # in case it's a list, convert it
if not np.all(weights >= 0):
raise ValueError('weights must be non-negative')
if not len(weights) == len(fwds):
raise ValueError('weights must be None or the same length as fwds')
w_sum = np.sum(weights)
if not w_sum > 0:
raise ValueError('weights cannot all be zero')
weights /= w_sum
# check our forward solutions
for fwd in fwds:
# check to make sure it's a forward solution
_validate_type(fwd, dict, "each entry in fwds", "dict")
# check to make sure the dict is actually a fwd
check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol',
'source_rr', 'source_ori', 'surf_ori', 'coord_frame',
'mri_head_t', 'nsource']
if not all(key in fwd for key in check_keys):
raise KeyError('forward solution dict does not have all standard '
'entries, cannot compute average.')
# check forward solution compatibility
if any(fwd['sol'][k] != fwds[0]['sol'][k]
for fwd in fwds[1:] for k in ['nrow', 'ncol']):
raise ValueError('Forward solutions have incompatible dimensions')
if any(fwd[k] != fwds[0][k] for fwd in fwds[1:]
for k in ['source_ori', 'surf_ori', 'coord_frame']):
raise ValueError('Forward solutions have incompatible orientations')
# actually average them (solutions and gradients)
fwd_ave = deepcopy(fwds[0])
fwd_ave['sol']['data'] *= weights[0]
fwd_ave['_orig_sol'] *= weights[0]
for fwd, w in zip(fwds[1:], weights[1:]):
fwd_ave['sol']['data'] += w * fwd['sol']['data']
fwd_ave['_orig_sol'] += w * fwd['_orig_sol']
if fwd_ave['sol_grad'] is not None:
fwd_ave['sol_grad']['data'] *= weights[0]
fwd_ave['_orig_sol_grad'] *= weights[0]
for fwd, w in zip(fwds[1:], weights[1:]):
fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data']
fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad']
return fwd_ave
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
result_service_gui/services/contestants_adapter.py
|
"""Module for contestants adapter."""
import copy
import logging
import os
from typing import List
from aiohttp import ClientSession
from aiohttp import hdrs, web
from multidict import MultiDict
from .raceclasses_adapter import RaceclassesAdapter
EVENTS_HOST_SERVER = os.getenv("EVENTS_HOST_SERVER", "localhost")
EVENTS_HOST_PORT = os.getenv("EVENTS_HOST_PORT", "8082")
EVENT_SERVICE_URL = f"http://{EVENTS_HOST_SERVER}:{EVENTS_HOST_PORT}"
class ContestantsAdapter:
"""Class representing contestants."""
async def assign_bibs(self, token: str, event_id: str) -> str:
"""Generate bibs based upon registrations."""
headers = MultiDict([(hdrs.AUTHORIZATION, f"Bearer {token}")])
url = f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/assign-bibs"
async with ClientSession() as session:
async with session.post(url, headers=headers) as resp:
res = resp.status
logging.debug(f"assign_bibs result - got response {resp}")
if res == 201:
pass
else:
servicename = "assign_bibs"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
information = "Startnummer tildelt."
return information
async def create_contestant(
self, token: str, event_id: str, request_body: dict
) -> str:
"""Create new contestant function."""
id = ""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
async with ClientSession() as session:
async with session.post(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants",
headers=headers,
json=request_body,
) as resp:
if resp.status == 201:
logging.debug(f"result - got response {resp}")
location = resp.headers[hdrs.LOCATION]
id = location.split(os.path.sep)[-1]
else:
servicename = "create_contestant"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
return id
async def create_contestants(self, token: str, event_id: str, inputfile) -> str:
"""Create new contestants function."""
headers = {
hdrs.CONTENT_TYPE: "text/csv",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
logging.debug(f"Create contestants - got file {inputfile}")
async with ClientSession() as session:
async with session.post(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants",
headers=headers,
data=inputfile,
) as resp:
res = resp.status
logging.info(f"result - got response {res} - {resp}")
if res == 200:
res = await resp.json()
else:
servicename = "create_contestants"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
return str(res)
async def delete_all_contestants(self, token: str, event_id: str) -> str:
"""Delete all contestants in one event function."""
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
async with ClientSession() as session:
async with session.delete(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants",
headers=headers,
) as resp:
res = resp.status
logging.debug(f"delete all result - got response {resp}")
if res == 204:
pass
else:
servicename = "delete_all_contestants"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
return str(res)
async def delete_contestant(
self, token: str, event_id: str, contestant_id: str
) -> str:
"""Delete one contestant function."""
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
async with ClientSession() as session:
async with session.delete(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant_id}",
headers=headers,
) as resp:
res = resp.status
logging.debug(f"delete result - got response {resp}")
if res == 204:
pass
else:
servicename = "delete_contestant"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
return str(res)
async def get_all_contestants_by_ageclass(
self, token: str, event_id: str, ageclass_name: str
) -> List:
"""Get all contestants / by class (optional) function."""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
contestants = []
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers
) as resp:
logging.debug(f"get_all_contestants - got response {resp.status}")
if resp.status == 200:
contestants = await resp.json()
else:
servicename = "get_all_contestants_by_ageclass"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
# TODO: Bør flyttes til backend
if ageclass_name != "":
tmp_contestants = []
for x in contestants:
if x["ageclass"] == ageclass_name:
tmp_contestants.append(x)
contestants = tmp_contestants
return contestants
async def get_all_contestants_by_raceclass(
self, token: str, event_id: str, raceclass_name: str
) -> List:
"""Get all contestants / by class function."""
ageclasses = []
raceclasses = await RaceclassesAdapter().get_raceclasses(token, event_id)
for raceclass in raceclasses:
if raceclass["name"] == raceclass_name:
ageclasses.append(raceclass["ageclass_name"])
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
contestants = []
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants", headers=headers
) as resp:
logging.debug(f"get_all_contestants - got response {resp.status}")
if resp.status == 200:
contestants = await resp.json()
else:
servicename = "get_all_contestants_by_ageclass"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
# TODO: Bør flyttes til backend
tmp_contestants = []
for x in contestants:
if x["ageclass"] in ageclasses:
tmp_contestants.append(x)
contestants = tmp_contestants
return contestants
async def get_contestant_by_bib(self, token: str, event_id: str, bib: str) -> dict:
"""Get contestant by bib function."""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
contestant = []
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants?bib={bib}",
headers=headers,
) as resp:
logging.debug(
f"get_contestants_by_raceclass - got response {resp.status}"
)
if resp.status == 200:
contestant = await resp.json()
else:
servicename = "get_contestants_by_bib"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
if len(contestant) == 0:
return {}
return contestant[0]
async def get_contestants_by_raceclass(
self, token: str, event_id: str, raceclass: str
) -> List:
"""Get all contestants by raceclass function."""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
contestants = []
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants?raceclass={raceclass}",
headers=headers,
) as resp:
logging.debug(
f"get_contestants_by_raceclass - got response {resp.status}"
)
if resp.status == 200:
contestants = await resp.json()
else:
servicename = "get_contestants_by_raceclass"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
return contestants
async def get_contestant(
self, token: str, event_id: str, contestant_id: str
) -> dict:
"""Get all contestant function."""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
contestant = {}
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant_id}",
headers=headers,
) as resp:
logging.debug(f"get_contestant - got response {resp.status}")
if resp.status == 200:
contestant = await resp.json()
else:
servicename = "get_contestant"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
return contestant
async def update_contestant(
self, token: str, event_id: str, contestant: dict
) -> str:
"""Create new contestants function."""
request_body = copy.deepcopy(contestant)
logging.debug(f"update_contestants, got request_body {request_body}")
url = f"{EVENT_SERVICE_URL}/events/{event_id}/contestants/{contestant['id']}"
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
async with ClientSession() as session:
async with session.put(url, headers=headers, json=request_body) as resp:
res = resp.status
if res == 204:
logging.debug(f"result - got response {resp}")
else:
servicename = "update_contestant"
body = await resp.json()
logging.error(f"{servicename} failed - {resp.status} - {body}")
raise web.HTTPBadRequest(
reason=f"Error - {resp.status}: {body['detail']}."
)
return str(resp.status)
|
[] |
[] |
[
"EVENTS_HOST_PORT",
"EVENTS_HOST_SERVER"
] |
[]
|
["EVENTS_HOST_PORT", "EVENTS_HOST_SERVER"]
|
python
| 2 | 0 | |
tests/unittest/log_checker/main_test.go
|
package main
import (
"context"
"database/sql"
"fmt"
"os"
"testing"
"time"
"regexp"
"strings"
"strconv"
"bufio"
"github.com/paypal/hera/tests/unittest/testutil"
"github.com/paypal/hera/utility/logger"
)
/*
To run the test
export DB_USER=x
export DB_PASSWORD=x
export DB_DATASOURCE=x
export username=realU
export password=realU-pwd
export TWO_TASK='tcp(mysql.example.com:3306)/someSchema?timeout=60s&tls=preferred||tcp(failover.example.com:3306)/someSchema'
export TWO_TASK_READ='tcp(mysqlr.example.com:3306)/someSchema?timeout=6s&tls=preferred||tcp(failover.example.com:3306)/someSchema'
$GOROOT/bin/go install .../worker/{mysql,oracle}worker
ln -s $GOPATH/bin/{mysql,oracle}worker .
$GOROOT/bin/go test -c .../tests/unittest/coordinator_basic && ./coordinator_basic.test
*/
var mx testutil.Mux
var tableName string
func cfg() (map[string]string, map[string]string, testutil.WorkerType) {
appcfg := make(map[string]string)
// appcfg["x-mysql"] = "manual" // disable test framework spawning mysql server
// best to chose an "unique" port in case golang runs tests in paralel
appcfg["bind_port"] = "31002"
appcfg["log_level"] = "5"
appcfg["log_file"] = "hera.log"
appcfg["sharding_cfg_reload_interval"] = "0"
appcfg["rac_sql_interval"] = "2"
appcfg["db_heartbeat_interval"] = "3"
opscfg := make(map[string]string)
opscfg["opscfg.default.server.max_connections"] = "3"
opscfg["opscfg.default.server.log_level"] = "5"
appcfg["child.executable"] = "mysqlworker"
return appcfg, opscfg, testutil.MySQLWorker
}
func before() error {
tableName = os.Getenv("TABLE_NAME")
if tableName == "" {
tableName = "jdbc_hera_test"
}
return nil
}
func TestMain(m *testing.M) {
os.Exit(testutil.UtilMain(m, cfg, before))
}
func TestCalClientSessionDur(t *testing.T) {
logger.GetLogger().Log(logger.Debug, "TestCalClientSessionDur begin +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n")
shard := 0
db, err := sql.Open("heraloop", fmt.Sprintf("%d:0:0", shard))
if err != nil {
t.Fatal("Error starting Mux:", err)
return
}
db.SetMaxIdleConns(0)
defer db.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
conn, err := db.Conn(ctx)
if err != nil {
t.Fatalf("Error getting connection %s\n", err.Error())
}
stmt, err := conn.PrepareContext(ctx, "select 'foo' from dual")
if err != nil {
t.Fatalf("Error with the prepared statement")
}
rows, err := stmt.QueryContext(ctx)
if err != nil {
t.Fatalf("Error with the QueryContext")
}
defer rows.Close()
stmt.Close()
cancel()
conn.Close()
clientSessionDurLogScan(t)
logger.GetLogger().Log(logger.Debug, "TestCalClientSessionDur done -------------------------------------------------------------")
}
func clientSessionDurLogScan(t *testing.T){
file, err := os.Open("cal.log")
defer file.Close()
if err != nil {
t.Fatalf("Error in opening cal.log")
}
re := regexp.MustCompile("[ |\t][0-9]+\\.[0-9]")
cliSession_re := regexp.MustCompile("CLIENT_SESSION.*corr_id_")
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if(cliSession_re.MatchString(line)){
_, err := strconv.ParseFloat(strings.TrimSpace(re.FindAllString(line, -1)[0]),32)
if(err != nil){
t.Fatalf("Num error for CLIENT_SESSION duration")
}
}
}
if err := scanner.Err(); err != nil {
t.Fatalf("cal.log read error")
}
}
|
[
"\"TABLE_NAME\""
] |
[] |
[
"TABLE_NAME"
] |
[]
|
["TABLE_NAME"]
|
go
| 1 | 0 | |
itests/common/src/main/java/org/wildfly/camel/test/common/utils/EnvironmentUtils.java
|
/*
* #%L
* Wildfly Camel :: Testsuite
* %%
* Copyright (C) 2013 - 2014 RedHat
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package org.wildfly.camel.test.common.utils;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Locale;
/**
* Collection of Environment utilities
*
* @author [email protected]
* @since 22-May-2015
*/
public final class EnvironmentUtils {
private static final boolean AIX;
private static final boolean LINUX;
private static final boolean MAC;
private static final boolean WINDOWS;
private static final boolean VM_IBM_JDK;
private static final boolean VM_OPEN_JDK;
private static final String JAVA;
private static final Path JAVA_HOME;
private static final Path WILDFLY_HOME;
static {
final String os = getOSName();
AIX = os.equals("aix");
LINUX = os.equals("linux");
MAC = os.startsWith("mac");
WINDOWS = os.contains("win");
VM_IBM_JDK = System.getProperty("java.vm.name").startsWith("IBM");
VM_OPEN_JDK = System.getProperty("java.vm.name").startsWith("OpenJDK");
String javaExecutable = "java";
if (WINDOWS) {
javaExecutable = "java.exe";
}
JAVA = javaExecutable;
String javaHome = System.getenv("JAVA_HOME");
if (javaHome == null) {
javaHome = System.getProperty("java.home");
}
JAVA_HOME = Paths.get(javaHome);
WILDFLY_HOME = Paths.get(System.getProperty("jboss.home.dir"));
}
public static String getOSName() {
return System.getProperty("os.name").toLowerCase(Locale.ROOT);
}
// hide ctor
private EnvironmentUtils() {
}
public static boolean isAIX() {
return AIX;
}
public static boolean isLinux() {
return LINUX;
}
public static boolean isMac() {
return MAC;
}
public static boolean isWindows() {
return WINDOWS;
}
public static boolean isUnknown() {
return !AIX && !LINUX && !MAC && !WINDOWS;
}
public static boolean isIbmJDK() {
return VM_IBM_JDK;
}
public static boolean isOpenJDK() {
return VM_OPEN_JDK;
}
public static Path getJavaExecutablePath() {
return Paths.get(JAVA_HOME.toString(), "bin", JAVA);
}
public static Path getWildFlyHome() {
return WILDFLY_HOME;
}
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
java
| 1 | 0 | |
testing/test_cacheprovider.py
|
from __future__ import absolute_import, division, print_function
import sys
import py
import _pytest
import pytest
import os
import shutil
pytest_plugins = "pytester",
class TestNewAPI(object):
def test_config_cache_makedir(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
with pytest.raises(ValueError):
config.cache.makedir("key/name")
p = config.cache.makedir("name")
assert p.check()
def test_config_cache_dataerror(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
cache = config.cache
pytest.raises(TypeError, lambda: cache.set("key/name", cache))
config.cache.set("key/name", 0)
config.cache._getvaluepath("key/name").write("123invalid")
val = config.cache.get("key/name", -2)
assert val == -2
def test_cache_writefail_cachfile_silent(self, testdir):
testdir.makeini("[pytest]")
testdir.tmpdir.join('.pytest_cache').write('gone wrong')
config = testdir.parseconfigure()
cache = config.cache
cache.set('test/broken', [])
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows')
def test_cache_writefail_permissions(self, testdir):
testdir.makeini("[pytest]")
testdir.tmpdir.ensure_dir('.pytest_cache').chmod(0)
config = testdir.parseconfigure()
cache = config.cache
cache.set('test/broken', [])
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows')
def test_cache_failure_warns(self, testdir):
testdir.tmpdir.ensure_dir('.pytest_cache').chmod(0)
testdir.makepyfile("""
def test_error():
raise Exception
""")
result = testdir.runpytest('-rw')
assert result.ret == 1
result.stdout.fnmatch_lines([
"*could not create cache path*",
"*1 warnings*",
])
def test_config_cache(self, testdir):
testdir.makeconftest("""
def pytest_configure(config):
# see that we get cache information early on
assert hasattr(config, "cache")
""")
testdir.makepyfile("""
def test_session(pytestconfig):
assert hasattr(pytestconfig, "cache")
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cachefuncarg(self, testdir):
testdir.makepyfile("""
import pytest
def test_cachefuncarg(cache):
val = cache.get("some/thing", None)
assert val is None
cache.set("some/thing", [1])
pytest.raises(TypeError, lambda: cache.get("some/thing"))
val = cache.get("some/thing", [])
assert val == [1]
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_custom_rel_cache_dir(self, testdir):
rel_cache_dir = os.path.join('custom_cache_dir', 'subdir')
testdir.makeini("""
[pytest]
cache_dir = {cache_dir}
""".format(cache_dir=rel_cache_dir))
testdir.makepyfile(test_errored='def test_error():\n assert False')
testdir.runpytest()
assert testdir.tmpdir.join(rel_cache_dir).isdir()
def test_custom_abs_cache_dir(self, testdir, tmpdir_factory):
tmp = str(tmpdir_factory.mktemp('tmp'))
abs_cache_dir = os.path.join(tmp, 'custom_cache_dir')
testdir.makeini("""
[pytest]
cache_dir = {cache_dir}
""".format(cache_dir=abs_cache_dir))
testdir.makepyfile(test_errored='def test_error():\n assert False')
testdir.runpytest()
assert py.path.local(abs_cache_dir).isdir()
def test_custom_cache_dir_with_env_var(self, testdir, monkeypatch):
monkeypatch.setenv('env_var', 'custom_cache_dir')
testdir.makeini("""
[pytest]
cache_dir = {cache_dir}
""".format(cache_dir='$env_var'))
testdir.makepyfile(test_errored='def test_error():\n assert False')
testdir.runpytest()
assert testdir.tmpdir.join('custom_cache_dir').isdir()
def test_cache_reportheader(testdir):
testdir.makepyfile("""
def test_hello():
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"cachedir: .pytest_cache"
])
def test_cache_show(testdir):
result = testdir.runpytest("--cache-show")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*cache is empty*"
])
testdir.makeconftest("""
def pytest_configure(config):
config.cache.set("my/name", [1,2,3])
config.cache.set("other/some", {1:2})
dp = config.cache.makedir("mydb")
dp.ensure("hello")
dp.ensure("world")
""")
result = testdir.runpytest()
assert result.ret == 5 # no tests executed
result = testdir.runpytest("--cache-show")
result.stdout.fnmatch_lines_random([
"*cachedir:*",
"-*cache values*-",
"*my/name contains:",
" [1, 2, 3]",
"*other/some contains*",
" {*1*: 2}",
"-*cache directories*-",
"*mydb/hello*length 0*",
"*mydb/world*length 0*",
])
class TestLastFailed(object):
def test_lastfailed_usecase(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
p.write(_pytest._code.Source("""
def test_1():
assert 1
def test_2():
assert 1
def test_3():
assert 0
"""))
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 passed*1 desel*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
# Run this again to make sure clear-cache is robust
if os.path.isdir('.pytest_cache'):
shutil.rmtree('.pytest_cache')
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
def test_failedfirst_order(self, testdir):
testdir.tmpdir.join('test_a.py').write(_pytest._code.Source("""
def test_always_passes():
assert 1
"""))
testdir.tmpdir.join('test_b.py').write(_pytest._code.Source("""
def test_always_fails():
assert 0
"""))
result = testdir.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines([
"test_a.py*",
"test_b.py*",
])
result = testdir.runpytest("--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines([
"test_b.py*",
"test_a.py*",
])
def test_lastfailed_failedfirst_order(self, testdir):
testdir.makepyfile(**{
'test_a.py': """
def test_always_passes():
assert 1
""",
'test_b.py': """
def test_always_fails():
assert 0
""",
})
result = testdir.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines([
"test_a.py*",
"test_b.py*",
])
result = testdir.runpytest("--lf", "--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines([
"test_b.py*",
])
assert 'test_a.py' not in result.stdout.str()
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile(test_a="""
def test_a1():
assert 0
def test_a2():
assert 1
""", test_b="""
def test_b1():
assert 0
""")
p = testdir.tmpdir.join("test_a.py")
p2 = testdir.tmpdir.join("test_b.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
p2.write(_pytest._code.Source("""
def test_b1():
assert 1
"""))
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 passed*",
])
result = testdir.runpytest("--lf", p)
result.stdout.fnmatch_lines([
"*1 failed*1 desel*",
])
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile("""
def test_1():
assert 0
""")
p2 = testdir.tmpdir.join("test_something.py")
p2.write(_pytest._code.Source("""
def test_2():
assert 0
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 failed*",
])
def test_lastfailed_xpass(self, testdir):
testdir.inline_runsource("""
import pytest
@pytest.mark.xfail
def test_hello():
assert 1
""")
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
assert lastfailed == -1
def test_non_serializable_parametrize(self, testdir):
"""Test that failed parametrized tests with unmarshable parameters
don't break pytest-cache.
"""
testdir.makepyfile(r"""
import pytest
@pytest.mark.parametrize('val', [
b'\xac\x10\x02G',
])
def test_fail(val):
assert False
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*1 failed in*')
def test_terminal_report_lastfailed(self, testdir):
test_a = testdir.makepyfile(test_a="""
def test_a1():
pass
def test_a2():
pass
""")
test_b = testdir.makepyfile(test_b="""
def test_b1():
assert 0
def test_b2():
assert 0
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'collected 4 items',
'*2 failed, 2 passed in*',
])
result = testdir.runpytest('--lf')
result.stdout.fnmatch_lines([
'collected 4 items',
'run-last-failure: rerun previous 2 failures',
'*2 failed, 2 deselected in*',
])
result = testdir.runpytest(test_a, '--lf')
result.stdout.fnmatch_lines([
'collected 2 items',
'run-last-failure: run all (no recorded failures)',
'*2 passed in*',
])
result = testdir.runpytest(test_b, '--lf')
result.stdout.fnmatch_lines([
'collected 2 items',
'run-last-failure: rerun previous 2 failures',
'*2 failed in*',
])
result = testdir.runpytest('test_b.py::test_b1', '--lf')
result.stdout.fnmatch_lines([
'collected 1 item',
'run-last-failure: rerun previous 1 failure',
'*1 failed in*',
])
def test_terminal_report_failedfirst(self, testdir):
testdir.makepyfile(test_a="""
def test_a1():
assert 0
def test_a2():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'collected 2 items',
'*1 failed, 1 passed in*',
])
result = testdir.runpytest('--ff')
result.stdout.fnmatch_lines([
'collected 2 items',
'run-last-failure: rerun previous 1 failure first',
'*1 failed, 1 passed in*',
])
def test_lastfailed_collectfailure(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import os
env = os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
def rlf(fail_import, fail_run):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
testdir.runpytest('-q')
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return lastfailed
lastfailed = rlf(fail_import=0, fail_run=0)
assert lastfailed == -1
lastfailed = rlf(fail_import=1, fail_run=0)
assert list(lastfailed) == ['test_maybe.py']
lastfailed = rlf(fail_import=0, fail_run=1)
assert list(lastfailed) == ['test_maybe.py::test_hello']
def test_lastfailed_failure_subset(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import os
env = os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
testdir.makepyfile(test_maybe2="""
import os
env = os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
def test_pass():
pass
""")
def rlf(fail_import, fail_run, args=()):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
result = testdir.runpytest('-q', '--lf', *args)
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return result, lastfailed
result, lastfailed = rlf(fail_import=0, fail_run=0)
assert lastfailed == -1
result.stdout.fnmatch_lines([
'*3 passed*',
])
result, lastfailed = rlf(fail_import=1, fail_run=0)
assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py']
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
# edge case of test selection - even if we remember failures
# from other tests we still need to run all tests if no test
# matches the failures
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
result.stdout.fnmatch_lines([
'*2 passed*',
])
def test_lastfailed_creates_cache_when_needed(self, testdir):
# Issue #1342
testdir.makepyfile(test_empty='')
testdir.runpytest('-q', '--lf')
assert not os.path.exists('.pytest_cache')
testdir.makepyfile(test_successful='def test_success():\n assert True')
testdir.runpytest('-q', '--lf')
assert not os.path.exists('.pytest_cache')
testdir.makepyfile(test_errored='def test_error():\n assert False')
testdir.runpytest('-q', '--lf')
assert os.path.exists('.pytest_cache')
def test_xfail_not_considered_failure(self, testdir):
testdir.makepyfile('''
import pytest
@pytest.mark.xfail
def test():
assert 0
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines('*1 xfailed*')
assert self.get_cached_last_failed(testdir) == []
def test_xfail_strict_considered_failure(self, testdir):
testdir.makepyfile('''
import pytest
@pytest.mark.xfail(strict=True)
def test():
pass
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines('*1 failed*')
assert self.get_cached_last_failed(testdir) == ['test_xfail_strict_considered_failure.py::test']
@pytest.mark.parametrize('mark', ['mark.xfail', 'mark.skip'])
def test_failed_changed_to_xfail_or_skip(self, testdir, mark):
testdir.makepyfile('''
import pytest
def test():
assert 0
''')
result = testdir.runpytest()
assert self.get_cached_last_failed(testdir) == ['test_failed_changed_to_xfail_or_skip.py::test']
assert result.ret == 1
testdir.makepyfile('''
import pytest
@pytest.{mark}
def test():
assert 0
'''.format(mark=mark))
result = testdir.runpytest()
assert result.ret == 0
assert self.get_cached_last_failed(testdir) == []
assert result.ret == 0
def get_cached_last_failed(self, testdir):
config = testdir.parseconfigure()
return sorted(config.cache.get("cache/lastfailed", {}))
def test_cache_cumulative(self, testdir):
"""
Test workflow where user fixes errors gradually file by file using --lf.
"""
# 1. initial run
test_bar = testdir.makepyfile(test_bar="""
def test_bar_1():
pass
def test_bar_2():
assert 0
""")
test_foo = testdir.makepyfile(test_foo="""
def test_foo_3():
pass
def test_foo_4():
assert 0
""")
testdir.runpytest()
assert self.get_cached_last_failed(testdir) == ['test_bar.py::test_bar_2', 'test_foo.py::test_foo_4']
# 2. fix test_bar_2, run only test_bar.py
testdir.makepyfile(test_bar="""
def test_bar_1():
pass
def test_bar_2():
pass
""")
result = testdir.runpytest(test_bar)
result.stdout.fnmatch_lines('*2 passed*')
# ensure cache does not forget that test_foo_4 failed once before
assert self.get_cached_last_failed(testdir) == ['test_foo.py::test_foo_4']
result = testdir.runpytest('--last-failed')
result.stdout.fnmatch_lines('*1 failed, 3 deselected*')
assert self.get_cached_last_failed(testdir) == ['test_foo.py::test_foo_4']
# 3. fix test_foo_4, run only test_foo.py
test_foo = testdir.makepyfile(test_foo="""
def test_foo_3():
pass
def test_foo_4():
pass
""")
result = testdir.runpytest(test_foo, '--last-failed')
result.stdout.fnmatch_lines('*1 passed, 1 deselected*')
assert self.get_cached_last_failed(testdir) == []
result = testdir.runpytest('--last-failed')
result.stdout.fnmatch_lines('*4 passed*')
assert self.get_cached_last_failed(testdir) == []
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
infra/vprotogen/main.go
|
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"github.com/xuanlove/v2ray-core"
"github.com/xuanlove/v2ray-core/common"
)
func main() {
pwd, wdErr := os.Getwd()
if wdErr != nil {
fmt.Println("Can not get current working directory.")
os.Exit(1)
}
GOBIN := common.GetGOBIN()
binPath := os.Getenv("PATH")
pathSlice := []string{binPath, GOBIN, pwd}
binPath = strings.Join(pathSlice, string(os.PathListSeparator))
os.Setenv("PATH", binPath)
EXE := ""
if runtime.GOOS == "windows" {
EXE = ".exe"
}
protoc := "protoc" + EXE
if path, err := exec.LookPath(protoc); err != nil {
fmt.Println("Make sure that you have `" + protoc + "` in your system path or current path. To download it, please visit https://github.com/protocolbuffers/protobuf/releases")
os.Exit(1)
} else {
protoc = path
}
protoFilesMap := make(map[string][]string)
walkErr := filepath.Walk("./", func(path string, info os.FileInfo, err error) error {
if err != nil {
fmt.Println(err)
return err
}
if info.IsDir() {
return nil
}
dir := filepath.Dir(path)
filename := filepath.Base(path)
if strings.HasSuffix(filename, ".proto") {
protoFilesMap[dir] = append(protoFilesMap[dir], path)
}
return nil
})
if walkErr != nil {
fmt.Println(walkErr)
os.Exit(1)
}
for _, files := range protoFilesMap {
for _, relProtoFile := range files {
var args []string
if core.ProtoFilesUsingProtocGenGoFast[relProtoFile] {
args = []string{"--gofast_out", pwd, "--gofast_opt", "paths=source_relative", "--plugin", "protoc-gen-gofast=" + GOBIN + "/protoc-gen-gofast" + EXE}
} else {
args = []string{"--go_out", pwd, "--go_opt", "paths=source_relative", "--go-grpc_out", pwd, "--go-grpc_opt", "paths=source_relative", "--plugin", "protoc-gen-go=" + GOBIN + "/protoc-gen-go" + EXE, "--plugin", "protoc-gen-go-grpc=" + GOBIN + "/protoc-gen-go-grpc" + EXE}
}
args = append(args, relProtoFile)
cmd := exec.Command(protoc, args...)
cmd.Env = append(cmd.Env, os.Environ()...)
output, cmdErr := cmd.CombinedOutput()
if len(output) > 0 {
fmt.Println(string(output))
}
if cmdErr != nil {
fmt.Println(cmdErr)
os.Exit(1)
}
}
}
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
source/setups/mhd/2d/current_sheet/preproc.py
|
#!/usr/bin/env python
import sys,os
home = os.environ['NEMO_HOME']
setup = os.environ['NEMO_SETUP']
sys.path.append(os.path.join(home,'scripts'))
from nemo import Makefile
flags = dict(
PP_N_NODES = 8,
PP_KERNEL_DGFV_QMAX = 3,
PP_MESH_PERIODIC = 1,
DO_PROFILING = 0,
PP_SPLIT_FORM = 0,
)
mk = Makefile(home,flags)
#kernel = home + '/source/kernel/dgfv/2d'
kernel = home + '/source/kernel/dgfv-smoothing/2d'
#kernel = home + '/source/kernel/fv-mc/2d'
#kernel = home + '/source/kernel/fv/2d'
#kernel = home + '/source/kernel/fv-minmod/2d'
#kernel = home + '/source/kernel/fv/2d'
# timedisc = home + '/source/timedisc/rungekutta/5-4'
timedisc = home + '/source/timedisc/rungekutta/ssp/5-4'
equations = home + '/source/equations/mhd/polytropic/2d'
mk.add(equations + '/riemann/rusanov/riemann_mod.f90',alias='riemann_inner_mod')
for d in 'north south west east'.split():
mk.add(equations + '/riemann/rusanov/riemann_mod.f90',alias='riemann_{0}_mod'.format(d))
# mk.add(equations + '/two_point/ec/two_point_flux_mod.f90')
mk.add(home + '/source/share_mod.f90')
mk.add(home + '/source/driver_mod.f90')
mk.add(home + '/source/runtime_mod.f90')
mk.add(home + '/source/utils/*.f90')
mk.add(home + '/source/mesh/**.f90')
mk.add(home + '/source/checkpoint/hdf5/flash/*.f90')
mk.add(equations + '/*.f90')
mk.add(kernel + '/*.f90')
mk.add(timedisc + '/*.f90')
mk.add(setup + '/../_common_/*_mod.f90')
mk.add(setup + '/*_mod.f90')
mk.generate()
|
[] |
[] |
[
"NEMO_HOME",
"NEMO_SETUP"
] |
[]
|
["NEMO_HOME", "NEMO_SETUP"]
|
python
| 2 | 0 | |
SheetMe.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oschub.settings")
import django
django.setup()
import gspread
from google.oauth2 import service_account
from eventreg.models import EventUserData, Event
from accounts.models import MailList
import datetime
from decouple import config
import json
# creates a spreadSheet.
def createSpreadSheet(mailList, title="NewSpreadsheet"):
try:
global createdNewSpreadSheet
if not createdNewSpreadSheet:
sheet = service.create(title)
print("[$] SpreadSheet ID: " + str(sheet.id))
for index, emailid in enumerate(mailList):
# Commented code cause Ownership Access error
# if index == 0:
# sheet.share(emailid, perm_type="user", role="owner")
# else:
sheet.share(emailid, perm_type="user", role="writer", notify=True)
print("Shared sheet to " + emailid)
createdNewSpreadSheet = True
except gspread.exceptions.APIError as error:
print("API Error: Trying Again !!")
print(error)
createSpreadSheet(mailList, title) # If API error then try again
def createSheet(title="EventName", row="10000", col="25"):
try:
global createdNewSpreadSheet
sheet = service.open("Events") # opens the file "Events"
print("[x] Found spreadsheet 'Events' ")
if createdNewSpreadSheet:
sheet.add_worksheet(title, rows=row, cols=col)
tmp = sheet.get_worksheet(0)
sheet.del_worksheet(tmp)
print(f"[!] Renamed default Sheet1 to {title}")
createdNewSpreadSheet = False
else:
sheet.add_worksheet(title, rows=row, cols=col)
print("[x] Added sheet - " + title)
worksheet = sheet.worksheet(title)
worksheet.append_row(["Reg No", "Name", "Email", "Registered", "Attended"])
worksheet.format(
"A1:E1", {"horizontalAlignment": "CENTER", "textFormat": {"bold": True}}
)
print(f"[x] Added Header data to the sheet {title}")
return worksheet
except gspread.exceptions.SpreadsheetNotFound:
print('[!] "Events" SpreadSheet not found, attempting to create a new one')
createSpreadSheet(admin_mail, "Events")
createSheet(title)
def getCompletedEvents():
# Filtering out the events that are over
events = Event.objects.all().filter(
eventDate__lt=datetime.date.today()
) # gets the events with date before today
eventlist = []
for event in events:
eventlist.append(event.eventName.replace(":", "|"))
events = Event.objects.filter(eventDate=datetime.date.today()).filter(
eventEndTime__lt=datetime.datetime.now().strftime("%H:%M:%S")
)
for event in events:
eventlist.append(event.eventName.replace(":", "|"))
return eventlist
def updateData():
admin_mail_latest = getAdminMail()
event_list = getCompletedEvents()
# If spreadsheet not found then make a new one
try:
sheet = service.open("Events")
except gspread.exceptions.SpreadsheetNotFound:
print('[!] "Events" SpreadSheet not found, attempting to create a new one')
createSpreadSheet(admin_mail, "Events")
sheet = service.open("Events")
# sharing the sheet once again to share the file with newly added user
for email_id in admin_mail_latest:
if email_id not in admin_mail:
sheet.share(email_id, perm_type="user", role="writer", notify=True)
print("Shared sheet to " + email_id)
# get all the available worksheets
worksheet = sheet.worksheets()
sheetList = []
for work in worksheet:
sheetList.append(work.title)
# getting user data for the events that are over
for event in event_list:
studentList = []
if event in sheetList:
print(f"[!] Skipping the Sheet, the worksheet {event} already exists !!")
else:
students = EventUserData.objects.filter(
eventName__eventName=event.replace("|", ":")
)
for student in students:
studentList.append(
[
student.studentReg,
student.studentName,
student.studentEmail,
"Yes" if student.studentRegistered else "No",
"Yes" if student.studentCheckedIn else "No",
]
)
worksheet = createSheet(event)
worksheet.batch_update(
[{"range": f"A2:E{len(studentList) + 1}", "values": studentList}]
)
print("[x] Added user data set to sheet " + event)
def getAdminMail():
admin_mail = []
mailList = MailList.objects.all()
for mail in mailList:
admin_mail.append(mail.email)
return admin_mail
def delAllSpreadsheet():
for spreadsheet in service.openall():
service.del_spreadsheet(spreadsheet.id)
print("deleted " + spreadsheet.title + " || " + spreadsheet.id)
# CAUTION: First Email is given owner access, rest all emails are given writer access due to API restrictions.
createdNewSpreadSheet = False
admin_mail = getAdminMail()
SCOPE = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive",
]
credential_info = json.loads(config("CREDENTIALS"))
credential = service_account.Credentials.from_service_account_info(
credential_info, scopes=SCOPE
)
service = gspread.authorize(credential)
if __name__ == "__main__":
# Use the following method to update data to the google spreadsheet
updateData()
# Use the following method to delete all the existing spreadsheets of the bot account
# delAllSpreadsheet()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/handlers/e2e/logger.py
|
import json
import logging
import logging.config
import os
class JsonLogFormatter(logging.Formatter):
def format(self, record):
result = {}
for attr, value in record.__dict__.items():
if attr == 'asctime':
value = self.formatTime(record)
if attr == 'exc_info' and value is not None:
value = self.formatException(value)
if attr == 'stack_info' and value is not None:
value = self.formatStack(value)
result[attr] = value
result['lambda_request_id'] = os.environ.get('LAMBDA_REQUEST_ID')
return json.dumps(result)
def get_logging_config():
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'logFormatter': {
'()': 'logger.JsonLogFormatter'
}
},
'loggers': {
'console': {
'handlers': ['consoleHandler'],
'level': 'DEBUG'
},
'botocore': {
'handlers': ['consoleHandler'],
'level': 'INFO'
},
'aws_sns_to_slack.py': {
'handlers': ['consoleHandler'],
'level': 'DEBUG'
}
},
'handlers': {
'consoleHandler': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'logFormatter'
}
},
'root': {
'handlers': ['consoleHandler'],
'level': 'DEBUG'
}
}
def get_logger(name):
logging.config.dictConfig(get_logging_config())
return logging.getLogger(name)
|
[] |
[] |
[
"LAMBDA_REQUEST_ID"
] |
[]
|
["LAMBDA_REQUEST_ID"]
|
python
| 1 | 0 | |
episodes/052/vendor/github.com/kubicorn/kubicorn/pkg/ssh/auth/agent.go
|
// Copyright © 2017 The Kubicorn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"bytes"
"fmt"
"io/ioutil"
"net"
"os"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
// SystemAgentIfExists returns system agent if it exists.
func SystemAgentIfExists() (agent.Agent, error) {
sshAgent, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
if err != nil {
return nil, err
}
return agent.NewClient(sshAgent), err
}
// CheckKey checks is key present in the agent.
func CheckKey(agent agent.Agent, pubkey string) error {
p, err := ioutil.ReadFile(pubkey)
if err != nil {
return err
}
authkey, _, _, _, _ := ssh.ParseAuthorizedKey(p)
if err != nil {
return err
}
parsedkey := authkey.Marshal()
list, err := agent.List()
if err != nil {
return err
}
for _, key := range list {
if bytes.Equal(key.Blob, parsedkey) {
return nil
}
}
return fmt.Errorf("key not found in keyring")
}
|
[
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"SSH_AUTH_SOCK"
] |
[]
|
["SSH_AUTH_SOCK"]
|
go
| 1 | 0 | |
lib/googlecloudsdk/core/console/console_io.py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General console printing utilities used by the Cloud SDK."""
import logging
import os
import re
import sys
import textwrap
import threading
import time
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_pager
from googlecloudsdk.core.util import files
from googlecloudsdk.third_party.py27 import py27_subprocess as subprocess
FLOAT_COMPARE_EPSILON = 1e-6
class Error(exceptions.Error):
"""Base exception for the module."""
pass
class UnattendedPromptError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(UnattendedPromptError, self).__init__(
'This prompt could not be answered because you are not in an '
'interactive session. You can re-run the command with the --quiet '
'flag to accept default answers for all prompts.')
class OperationCancelledError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(OperationCancelledError, self).__init__('Operation cancelled.')
class TablePrinter(object):
"""Provides the ability to print a list of items as a formatted table.
Using this class helps you adhere to the gcloud style guide.
The table will auto size the columns to fit the maximum item length for that
column. You can also choose how to justify each column and to add extra
padding to each column.
"""
JUSTIFY_LEFT = '<'
JUSTIFY_RIGHT = '>'
JUSTIFY_CENTER = '^'
def __init__(self, headers, title=None,
justification=None, column_padding=None):
"""Creates a new TablePrinter.
Args:
headers: A tuple of strings that represent the column headers titles.
This can be a tuple of empty strings or None's if you do not want
headers displayed. The number of empty elements in the tuple must match
the number of columns you want to display.
title: str, An optional title for the table.
justification: A tuple of JUSTIFY_LEFT, JUSTIFY_RIGHT, JUSTIFY_CENTER that
describes the justification for each column. This must have the same
number of items as the headers tuple.
column_padding: A tuple of ints that describes the extra padding that
should be added to each column. This must have the same
number of items as the headers tuple.
Raises:
ValueError: If the justification or column_padding tuples are not of the
correct type or length.
"""
self.__headers = [h if h else '' for h in headers]
self.__title = title
self.__num_columns = len(self.__headers)
self.__header_widths = [len(str(x)) for x in self.__headers]
self.__column_padding = column_padding
if self.__column_padding is None:
self.__column_padding = tuple([0] * self.__num_columns)
if (not isinstance(self.__column_padding, (tuple)) or
len(self.__column_padding) != self.__num_columns):
raise ValueError('Column padding tuple does not have {0} columns'
.format(self.__num_columns))
self.__justification = justification
if self.__justification is None:
self.__justification = tuple([TablePrinter.JUSTIFY_LEFT] *
self.__num_columns)
if (not isinstance(self.__justification, tuple) or
len(self.__justification) != self.__num_columns):
raise ValueError('Justification tuple does not have {0} columns'
.format(self.__num_columns))
for value in self.__justification:
if not (value is TablePrinter.JUSTIFY_LEFT or
value is TablePrinter.JUSTIFY_RIGHT or
value is TablePrinter.JUSTIFY_CENTER):
raise ValueError('Justification values must be one of JUSTIFY_LEFT, '
'JUSTIFY_RIGHT, or JUSTIFY_CENTER')
def SetTitle(self, title):
"""Sets the title of the table.
Args:
title: str, The new title.
"""
self.__title = title
def Log(self, rows, logger=None, level=logging.INFO):
"""Logs the given rows to the given logger.
Args:
rows: list of tuples, The rows to log the formatted table for.
logger: logging.Logger, The logger to do the logging. If None, the root
logger will be used.
level: logging level, An optional override for the logging level, INFO by
default.
"""
if not logger:
logger = log.getLogger()
lines = self.GetLines(rows)
for line in lines:
logger.log(level, line)
def Print(self, rows, output_stream=None, indent=0):
"""Prints the given rows to stdout.
Args:
rows: list of tuples, The rows to print the formatted table for.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
indent: int, The number of spaces to indent all lines of the table.
"""
if not output_stream:
output_stream = log.out
lines = self.GetLines(rows, indent=indent)
for line in lines:
output_stream.write(line + '\n')
def GetLines(self, rows, indent=0):
"""Gets a list of strings of formatted lines for the given rows.
Args:
rows: list of tuples, The rows to get the formatted table for.
indent: int, The number of spaces to indent all lines of the table.
Returns:
list of str, The lines of the formatted table that can be printed.
Raises:
ValueError: If any row does not have the correct number of columns.
"""
column_widths = list(self.__header_widths)
for row in rows:
if len(row) != self.__num_columns:
raise ValueError('Row [{row}] does not have {rows} columns'
.format(row=row, rows=self.__num_columns))
# Find the max width of each column
for i in range(self.__num_columns):
column_widths[i] = max(column_widths[i], len(str(row[i])))
# Add padding
column_widths = [column_widths[i] + self.__column_padding[i]
for i in range(self.__num_columns)]
total_width = (len(column_widths) - 1) * 3
for width in column_widths:
total_width += width
edge_line = ('--' +
'---'.join(['-' * width for width in column_widths]) +
'--')
title_divider_line = ('|-' +
'---'.join(['-' * width for width in column_widths]) +
'-|')
divider_line = ('|-' +
'-+-'.join(['-' * width for width in column_widths]) +
'-|')
lines = [edge_line]
if self.__title:
title_line = '| {{title:{justify}{width}s}} |'.format(
justify=TablePrinter.JUSTIFY_CENTER, width=total_width).format(
title=self.__title)
lines.append(title_line)
lines.append(title_divider_line)
# Generate format strings with the correct width for each column
column_formats = []
for i in range(self.__num_columns):
column_formats.append('{{i{i}:{justify}{width}s}}'.format(
i=i, justify=self.__justification[i], width=column_widths[i]))
pattern = '| ' + ' | '.join(column_formats) + ' |'
def _ParameterizedArrayDict(array):
return dict(('i{i}'.format(i=i), array[i]) for i in range(len(array)))
if [h for h in self.__headers if h]:
# Only print headers if there is at least one non-empty header
lines.append(pattern.format(**_ParameterizedArrayDict(self.__headers)))
lines.append(divider_line)
lines.extend([pattern.format(**_ParameterizedArrayDict(row))
for row in rows])
lines.append(edge_line)
if indent:
return [(' ' * indent) + l for l in lines]
return lines
class ListPrinter(object):
"""Provides the ability to print a list of items as a formatted list.
Using this class helps you adhere to the gcloud style guide.
"""
def __init__(self, title):
"""Create a titled list printer that can print rows to stdout.
Args:
title: A string for the title of the list.
"""
self.__title = title
def Print(self, rows, output_stream=None):
"""Print this list with the provided rows to stdout.
Args:
rows: A list of objects representing the rows of this list. Before being
printed, they will be converted to strings.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
"""
if not output_stream:
output_stream = log.out
output_stream.write(self.__title + '\n')
for row in rows:
output_stream.write(' - ' + str(row) + '\n')
TEXTWRAP = textwrap.TextWrapper(replace_whitespace=False,
drop_whitespace=False,
break_on_hyphens=False)
def _DoWrap(message):
"""Text wrap the given message and correctly handle newlines in the middle.
Args:
message: str, The message to wrap. It may have newlines in the middle of
it.
Returns:
str, The wrapped message.
"""
return '\n'.join([TEXTWRAP.fill(line) for line in message.splitlines()])
def _RawInput(prompt=None):
"""A simple redirect to the built-in raw_input function.
If the prompt is given, it is correctly line wrapped.
Args:
prompt: str, An optional prompt.
Returns:
The input from stdin.
"""
if prompt:
sys.stderr.write(_DoWrap(prompt))
try:
return raw_input()
except EOFError:
return None
def IsInteractive(output=False, error=False, heuristic=False):
"""Determines if the current terminal session is interactive.
sys.stdin must be a terminal input stream.
Args:
output: If True then sys.stdout must also be a terminal output stream.
error: If True then sys.stderr must also be a terminal output stream.
heuristic: If True then we also do some additional heuristics to check if
we are in an interactive context. Checking home path for example.
Returns:
True if the current terminal session is interactive.
"""
if not sys.stdin.isatty():
return False
if output and not sys.stdout.isatty():
return False
if error and not sys.stderr.isatty():
return False
if heuristic:
# Check the home path. Most startup scripts for example are executed by
# users that don't have a home path set. Home is OS dependent though, so
# check everything.
# *NIX OS usually sets the HOME env variable. It is usually '/home/user',
# but can also be '/root'. If it's just '/' we are most likely in an init
# script.
# Windows usually sets HOMEDRIVE and HOMEPATH. If they don't exist we are
# probably being run from a task scheduler context. HOMEPATH can be '\'
# when a user has a network mapped home directory.
# Cygwin has it all! Both Windows and Linux. Checking both is perfect.
home = os.getenv('HOME')
homepath = os.getenv('HOMEPATH')
if not homepath and (not home or home == '/'):
return False
return True
def CanPrompt():
"""Returns true if we can prompt the user for information.
This combines all checks (IsInteractive(), disable_prompts is False) to
verify that we can prompt the user for information.
Returns:
bool, True if we can prompt the user for information.
"""
return (IsInteractive(error=True) and
not properties.VALUES.core.disable_prompts.GetBool())
def PromptContinue(message=None, prompt_string=None, default=True,
throw_if_unattended=False, cancel_on_no=False):
"""Prompts the user a yes or no question and asks if they want to continue.
Args:
message: str, The prompt to print before the question.
prompt_string: str, An alternate yes/no prompt to display. If None, it
defaults to 'Do you want to continue'.
default: bool, What the default answer should be. True for yes, False for
no.
throw_if_unattended: bool, If True, this will throw if there was nothing
to consume on stdin and stdin is not a tty.
cancel_on_no: bool, If True and the user answers no, throw an exception to
cancel the entire operation. Useful if you know you don't want to
continue doing anything and don't want to have to raise your own
exception.
Raises:
UnattendedPromptError: If there is no input to consume and this is not
running in an interactive terminal.
OperationCancelledError: If the user answers no and cancel_on_no is True.
Returns:
bool, False if the user said no, True if the user said anything else or if
prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
if not default and cancel_on_no:
raise OperationCancelledError()
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n\n')
if not prompt_string:
prompt_string = 'Do you want to continue'
if default:
prompt_string += ' (Y/n)? '
else:
prompt_string += ' (y/N)? '
sys.stderr.write(_DoWrap(prompt_string))
def GetAnswer():
while True:
answer = _RawInput()
# pylint:disable=g-explicit-bool-comparison, We explicitly want to
# distinguish between empty string and None.
if answer == '':
# User just hit enter, return default.
sys.stderr.write('\n')
return default
elif answer is None:
# This means we hit EOF, no input or user closed the stream.
if throw_if_unattended and not IsInteractive():
sys.stderr.write('\n')
raise UnattendedPromptError()
else:
sys.stderr.write('\n')
return default
elif answer.lower() in ['y', 'yes']:
sys.stderr.write('\n')
return True
elif answer.lower() in ['n', 'no']:
sys.stderr.write('\n')
return False
else:
sys.stderr.write("Please enter 'y' or 'n': ")
answer = GetAnswer()
if not answer and cancel_on_no:
raise OperationCancelledError()
return answer
def PromptResponse(message):
"""Prompts the user for a string.
Args:
message: str, The prompt to print before the question.
Returns:
str, The string entered by the user, or None if prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return None
response = _RawInput(message)
return response
def PromptWithDefault(message, default=None):
"""Prompts the user for a string, allowing a default.
Unlike PromptResponse, this also appends a ': ' to the prompt. If 'default'
is specified, the default is also written written into the prompt (e.g.
if message is "message" and default is "default", the prompt would be
"message (default): ").
The default is returned if the user simply presses enter (no input) or an
EOF is received.
Args:
message: str, The prompt to print before the question.
default: str, The default value (if any).
Returns:
str, The string entered by the user, or the default if no value was
entered or prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return default
if default:
message += ' ({default}): '.format(default=default)
else:
message += ': '
response = _RawInput(message)
if not response:
response = default
return response
def PromptChoice(options, default=None, message=None, prompt_string=None):
"""Prompt the user to select a choice from a list of items.
Args:
options: [object], A list of objects to print as choices. Their str()
method will be used to display them.
default: int, The default index to return if prompting is disabled or if
they do not enter a choice.
message: str, An optional message to print before the choices are displayed.
prompt_string: str, A string to print when prompting the user to enter a
choice. If not given, a default prompt is used.
Raises:
ValueError: If no options are given or if the default is not in the range of
available options.
Returns:
The index of the item in the list that was chosen, or the default if prompts
are disabled.
"""
if not options:
raise ValueError('You must provide at least one option.')
maximum = len(options)
if default is not None and not 0 <= default < maximum:
raise ValueError(
'Default option [{default}] is not a valid index for the options list '
'[{maximum} options given]'.format(default=default, maximum=maximum))
if properties.VALUES.core.disable_prompts.GetBool():
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n')
for i, option in enumerate(options):
sys.stderr.write(' [{index}] {option}\n'.format(
index=i + 1, option=str(option)))
if not prompt_string:
prompt_string = 'Please enter your numeric choice'
if default is None:
suffix_string = ': '
else:
suffix_string = ' ({default}): '.format(default=default + 1)
sys.stderr.write(_DoWrap(prompt_string + suffix_string))
while True:
answer = _RawInput()
if answer is None or (answer is '' and default is not None):
# Return default if we failed to read from stdin
# Return default if the user hit enter and there is a valid default
# Prompt again otherwise
sys.stderr.write('\n')
return default
try:
num_choice = int(answer)
if num_choice < 1 or num_choice > maximum:
raise ValueError('Choice must be between 1 and {maximum}'.format(
maximum=maximum))
sys.stderr.write('\n')
return num_choice - 1
except ValueError:
sys.stderr.write('Please enter a value between 1 and {maximum}: '
.format(maximum=maximum))
def LazyFormat(s, **kwargs):
"""Converts {key} => value for key, value in kwargs.iteritems().
After the {key} converstions it converts {{<identifier>}} => {<identifier>}.
Args:
s: str, The string to format.
**kwargs: {str:str}, A dict of strings for named parameters.
Returns:
str, The lazily-formatted string.
"""
for key, value in kwargs.iteritems():
fmt = '{' + key + '}'
start = 0
while True:
start = s.find(fmt, start)
if start == -1:
break
if (start and s[start - 1] == '{' and
len(fmt) < len(s[start:]) and s[start + len(fmt)] == '}'):
# {{key}} => {key}
s = s[0:start - 1] + fmt + s[start + len(fmt) + 1:]
start += len(fmt)
else:
# {key} => value
s = s[0:start] + value + s[start + len(fmt):]
start += len(value)
# {{unknown}} => {unknown}
return re.sub(r'{({\w+})}', r'\1', s)
def PrintExtendedList(items, col_fetchers):
"""Print a properly formated extended list for some set of resources.
If items is a generator, this function may elect to only request those rows
that it is ready to display.
Args:
items: [resource] or a generator producing resources, The objects
representing cloud resources.
col_fetchers: [(string, func(resource))], A list of tuples, one for each
column, in the order that they should appear. The string is the title
of that column which will be printed in a header. The func is a function
that will fetch a row-value for that column, given the resource
corresponding to the row.
"""
total_items = 0
rows = [[title for (title, unused_func) in col_fetchers]]
for item in items:
total_items += 1
row = []
for (unused_title, func) in col_fetchers:
value = func(item)
if value is None:
row.append('-')
else:
row.append(value)
rows.append(row)
attr = console_attr.GetConsoleAttr()
max_col_widths = [0] * len(col_fetchers)
for row in rows:
for col in range(len(row)):
max_col_widths[col] = max(max_col_widths[col],
attr.DisplayWidth(unicode(row[col]))+2)
for row in rows:
for col in range(len(row)):
width = max_col_widths[col]
item = unicode(row[col])
item_width = attr.DisplayWidth(item)
if item_width < width and col != len(row) - 1:
item += u' ' * (width - item_width)
log.out.write(item)
log.out.write('\n')
if not total_items:
log.status.write('Listed 0 items.\n')
class ProgressTracker(object):
"""A context manager for telling the user about long-running progress."""
SPIN_MARKS = [
'|',
'/',
'-',
'\\',
]
def __init__(self, message, autotick=True, detail_message_callback=None,
tick_delay=1):
self._message = message
self._prefix = message + '...'
self._ticks = 0
self._autotick = autotick
self._done = False
self._lock = threading.Lock()
self._detail_message_callback = detail_message_callback
self._last_message_size = 0
self._tick_delay = tick_delay
self._is_tty = IsInteractive(output=True, error=True)
def _GetPrefix(self):
if self._detail_message_callback:
detail_message = self._detail_message_callback()
if detail_message:
return self._prefix + ' ' + detail_message + '...'
return self._prefix
def __enter__(self):
log.file_only_logger.info(self._GetPrefix())
self._Print()
if self._autotick:
def Ticker():
while True:
time.sleep(self._tick_delay)
if self.Tick():
return
threading.Thread(target=Ticker).start()
return self
def Tick(self):
"""Give a visual indication to the user that some progress has been made.
Output is sent to sys.stderr. Nothing is shown if output is not a TTY.
Returns:
Whether progress has completed.
"""
if self._is_tty:
with self._lock:
if not self._done:
self._ticks += 1
self._Print(ProgressTracker.SPIN_MARKS[
self._ticks % len(ProgressTracker.SPIN_MARKS)])
return self._done
def _Print(self, message=''):
"""Reprints the prefix followed by an optional message."""
display_message = self._GetPrefix()
if message:
display_message += message
# This is to clear the display buffer, otherwise it would display the
# trailing parts of the previous line
if self._last_message_size > 0:
sys.stderr.write('\r' + self._last_message_size * ' ')
self._last_message_size = len(display_message)
sys.stderr.write('\r' + display_message)
sys.stderr.flush()
def __exit__(self, ex_type, unused_value, unused_traceback):
with self._lock:
self._done = True
# If an exception was raised during progress tracking, exit silently here
# and let the appropriate exception handler tell the user what happened.
if ex_type:
# This is to prevent the tick character from appearing before 'failed.'
# (ex. 'message...failed' instead of 'message.../failed.')
self._Print('failed.\n')
return False
self._Print('done.\n')
class DelayedProgressTracker(ProgressTracker):
"""A progress tracker that only appears during a long running operation.
Waits for the given timeout, then displays a progress tacker.
"""
class TrackerState(object):
"""Enum representing the current state of the progress tracker."""
class _TrackerStateTuple(object):
def __init__(self, name):
self.name = name
WAITING = _TrackerStateTuple('Waiting')
STARTED = _TrackerStateTuple('Started')
FINISHED = _TrackerStateTuple('Finished')
def __init__(self, message, timeout, autotick=True,
detail_message_callback=None):
super(DelayedProgressTracker, self).__init__(
message, autotick=autotick,
detail_message_callback=detail_message_callback)
self._timeout = timeout
self._state = self.TrackerState.WAITING
self._state_lock = threading.Lock()
def _SleepWhileNotFinished(self, timeout, increment=0.1):
"""Sleep for the given time unless the tracker enters the FINISHED state.
Args:
timeout: number, the total time for which to sleep
increment: number, the increment at which to check whether the tracker is
FINISHED
Returns:
bool, True unless the tracker reached the FINISHED state before the total
sleep time elapsed
"""
elapsed_time = 0
while (elapsed_time + FLOAT_COMPARE_EPSILON) <= timeout:
time.sleep(increment)
elapsed_time += increment
if self._state is self.TrackerState.FINISHED:
return False
return True
def __enter__(self):
def StartTracker():
if not self._SleepWhileNotFinished(self._timeout):
# If we aborted sleep early, return. We exited the progress tracker
# before the delay finished.
return
with self._state_lock:
if self._state is not self.TrackerState.FINISHED:
self._state = self.TrackerState.STARTED
super(DelayedProgressTracker, self).__enter__()
threading.Thread(target=StartTracker).start()
return self
def __exit__(self, exc_type, exc_value, traceback):
with self._state_lock:
if self._state is self.TrackerState.STARTED:
super(DelayedProgressTracker, self).__exit__(exc_type, exc_value,
traceback)
self._state = self.TrackerState.FINISHED
def Tick(self):
with self._state_lock:
if self._state is self.TrackerState.STARTED:
return super(DelayedProgressTracker, self).Tick()
return self._state is self.TrackerState.FINISHED
class ProgressBar(object):
"""A simple progress bar for tracking completion of an action.
This progress bar works without having to use any control characters. It
prints the action that is being done, and then fills a progress bar below it.
You should not print anything else on the output stream during this time as it
will cause the progress bar to break on lines.
Progress bars can be stacked into a group. first=True marks the first bar in
the group and last=True marks the last bar in the group. The default assumes
a singleton bar with first=True and last=True.
This class can also be used in a context manager.
"""
@staticmethod
def _DefaultCallback(progress_factor):
pass
DEFAULT_CALLBACK = _DefaultCallback
@staticmethod
def SplitProgressBar(original_callback, weights):
"""Splits a progress bar into logical sections.
Wraps the original callback so that each of the subsections can use the full
range of 0 to 1 to indicate its progress. The overall progress bar will
display total progress based on the weights of the tasks.
Args:
original_callback: f(float), The original callback for the progress bar.
weights: [float], The weights of the tasks to create. These can be any
numbers you want and the split will be based on their proportions to
each other.
Raises:
ValueError: If the weights don't add up to 1.
Returns:
(f(float), ), A tuple of callback functions, in order, for the subtasks.
"""
if (original_callback is None or
original_callback == ProgressBar.DEFAULT_CALLBACK):
return tuple([ProgressBar.DEFAULT_CALLBACK for _ in range(len(weights))])
def MakeCallback(already_done, weight):
def Callback(done_fraction):
original_callback(already_done + (done_fraction * weight))
return Callback
total = float(sum(weights))
callbacks = []
already_done = 0
for weight in weights:
normalized_weight = weight / total
callbacks.append(MakeCallback(already_done, normalized_weight))
already_done += normalized_weight
return tuple(callbacks)
def __init__(self, label, stream=log.status, total_ticks=60, first=True,
last=True):
"""Creates a progress bar for the given action.
Args:
label: str, The action that is being performed.
stream: The output stream to write to, stderr by default.
total_ticks: int, The number of ticks wide to make the progress bar.
first: bool, True if this is the first bar in a stacked group.
last: bool, True if this is the last bar in a stacked group.
"""
self._stream = stream
self._ticks_written = 0
self._total_ticks = total_ticks
self._first = first
self._last = last
attr = console_attr.ConsoleAttr()
self._box = attr.GetBoxLineCharacters()
self._redraw = (self._box.d_dr != self._box.d_vr or
self._box.d_dl != self._box.d_vl)
max_label_width = self._total_ticks - 4
if len(label) > max_label_width:
label = label[:max_label_width - 3] + '...'
elif len(label) < max_label_width:
diff = max_label_width - len(label)
label += ' ' * diff
left = self._box.d_vr + self._box.d_h
right = self._box.d_h + self._box.d_vl
self._label = u'{left} {label} {right}'.format(left=left, label=label,
right=right)
def Start(self):
"""Starts the progress bar by writing the top rule and label."""
if self._first or self._redraw:
left = self._box.d_dr if self._first else self._box.d_vr
right = self._box.d_dl if self._first else self._box.d_vl
rule = u'{left}{middle}{right}\n'.format(
left=left, middle=self._box.d_h * self._total_ticks, right=right)
self._stream.write(rule)
self._stream.write(self._label + '\n')
self._stream.write(self._box.d_ur)
self._ticks_written = 0
def SetProgress(self, progress_factor):
"""Sets the current progress of the task.
This method has no effect if the progress bar has already progressed past
the progress you call it with (since the progress bar cannot back up).
Args:
progress_factor: float, The current progress as a float between 0 and 1.
"""
expected_ticks = int(self._total_ticks * progress_factor)
new_ticks = expected_ticks - self._ticks_written
# Don't allow us to go over 100%.
new_ticks = min(new_ticks, self._total_ticks - self._ticks_written)
if new_ticks > 0:
self._stream.write(self._box.d_h * new_ticks)
self._ticks_written += new_ticks
if expected_ticks == self._total_ticks:
end = '\n' if self._last or not self._redraw else '\r'
self._stream.write(self._box.d_ul + end)
self._stream.flush()
def Finish(self):
"""Mark the progress as done."""
self.SetProgress(1)
def __enter__(self):
self.Start()
return self
def __exit__(self, *args):
self.Finish()
def More(contents, out=None, prompt=None, check_pager=True):
"""Run a user specified pager or fall back to the internal pager.
Args:
contents: The entire contents of the text lines to page.
out: The output stream, log.out (effectively) if None.
prompt: The page break prompt.
check_pager: Checks the PAGER env var and uses it if True.
"""
if not IsInteractive(output=True):
if not out:
out = log.out
out.write(contents)
return
if not out:
# Rendered help to the log file.
log.file_only_logger.info(contents)
# Paging shenanigans to stdout.
out = sys.stdout
if check_pager:
pager = os.environ.get('PAGER', None)
if pager == '-':
# Use the fallback Pager.
pager = None
elif not pager:
# Search for a pager that handles ANSI escapes.
for command in ('less', 'pager'):
if files.FindExecutableOnPath(command):
pager = command
break
if pager:
less = os.environ.get('LESS', None)
if less is None:
os.environ['LESS'] = '-R'
p = subprocess.Popen(pager, stdin=subprocess.PIPE, shell=True)
encoding = console_attr.GetConsoleAttr().GetEncoding()
p.communicate(input=contents.encode(encoding))
p.wait()
if less is None:
os.environ.pop('LESS')
return
# Fall back to the internal pager.
console_pager.Pager(contents, out, prompt).Run()
|
[] |
[] |
[
"PAGER",
"LESS",
"HOME",
"HOMEPATH"
] |
[]
|
["PAGER", "LESS", "HOME", "HOMEPATH"]
|
python
| 4 | 0 | |
cmd/download.go
|
package cmd
import (
"crypto"
"crypto/rsa"
"crypto/x509"
"encoding/hex"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/pinpt/go-common/v10/api"
"github.com/pinpt/go-common/v10/fileutil"
"github.com/pinpt/go-common/v10/log"
pos "github.com/pinpt/go-common/v10/os"
pstr "github.com/pinpt/go-common/v10/strings"
"github.com/spf13/cobra"
)
func openCert(pemFilename string) (*x509.Certificate, error) {
buf, err := ioutil.ReadFile(pemFilename)
if err != nil {
return nil, fmt.Errorf("error openning file: %w", err)
}
block, _ := pem.Decode(buf)
if block == nil {
return nil, fmt.Errorf("no pem data in file %s", pemFilename)
}
return x509.ParseCertificate(block.Bytes)
}
func downloadIntegration(logger log.Logger, channel string, toDir string, publisher string, integration string, version string) (string, error) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
return "", fmt.Errorf("error creating temp dir: %w", err)
}
defer os.RemoveAll(tmpdir)
cl, err := api.NewHTTPAPIClientDefault()
if err != nil {
return "", fmt.Errorf("error creating client: %w", err)
}
p := fmt.Sprintf("/fetch/%s/%s", publisher, integration)
if version != "" {
p += "/" + version
}
url := pstr.JoinURL(api.BackendURL(api.RegistryService, channel), p)
log.Debug(logger, "downloading", "url", url)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return "", fmt.Errorf("error creating request: %w", err)
}
resp, err := cl.Do(req)
if err != nil {
return "", fmt.Errorf("error executing request: %w", err)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("error downloading request status %d", resp.StatusCode)
}
defer resp.Body.Close()
signature := resp.Header.Get("x-pinpt-signature")
if signature == "" {
return "", fmt.Errorf("no signature from server, cannot verify bundle")
}
sigBuf, err := hex.DecodeString(signature)
if err != nil {
return "", fmt.Errorf("error decoding signature: %w", err)
}
src := filepath.Join(tmpdir, "bundle.zip")
dest := filepath.Join(tmpdir, "bundle")
of, err := os.Create(src)
if err != nil {
return "", fmt.Errorf("error opening download file: %w", err)
}
defer of.Close()
if _, err := io.Copy(of, resp.Body); err != nil {
return "", fmt.Errorf("error copying bundle data: %w", err)
}
if err := of.Close(); err != nil {
return "", fmt.Errorf("error closing bundle.zip: %w", err)
}
if err := resp.Body.Close(); err != nil {
return "", fmt.Errorf("error closing response body: %w", err)
}
// TODO(robin): figure out why we cant use hash.ChecksumCopy
sum, err := fileutil.Checksum(src)
if err != nil {
return "", fmt.Errorf("error taking checksum of downloaded bundle: %w", err)
}
checksum, err := hex.DecodeString(sum)
if err != nil {
return "", fmt.Errorf("error decoding checksum: %w", err)
}
if err := fileutil.Unzip(src, dest); err != nil {
return "", fmt.Errorf("error performing unzip for integration: %w", err)
}
certfile := filepath.Join(dest, "cert.pem")
if !fileutil.FileExists(certfile) {
return "", fmt.Errorf("error finding integration developer certificate (%s) for bundle", certfile)
}
cert, err := openCert(certfile)
if err != nil {
return "", fmt.Errorf("error opening certificate from bundle: %w", err)
}
pub, ok := cert.PublicKey.(*rsa.PublicKey)
if !ok {
return "", fmt.Errorf("certificate public key was not rsa")
}
if err := rsa.VerifyPKCS1v15(pub, crypto.SHA256, checksum, sigBuf); err != nil {
if err == rsa.ErrVerification {
return "", fmt.Errorf("invalid signature or certificate")
}
return "", fmt.Errorf("error verifying bundle signature: %w", err)
}
datafn := filepath.Join(dest, "data.zip")
if err := fileutil.Unzip(datafn, dest); err != nil {
return "", fmt.Errorf("error performing unzip for integration data: %w", err)
}
destfn := filepath.Join(dest, runtime.GOOS, runtime.GOARCH, integration)
if !fileutil.FileExists(destfn) {
return "", fmt.Errorf("error finding integration binary (%s) in bundle", destfn)
}
sf, err := os.Open(destfn)
if err != nil {
return "", fmt.Errorf("error opening file (%s): %w", destfn, err)
}
defer sf.Close()
outfn := filepath.Join(toDir, integration)
os.Remove(outfn)
df, err := os.Create(outfn)
if err != nil {
return "", fmt.Errorf("error creating file (%s): %w", outfn, err)
}
defer df.Close()
if _, err := io.Copy(df, sf); err != nil {
return "", fmt.Errorf("error copying binary data: %w", err)
}
if err := df.Close(); err != nil {
return "", fmt.Errorf("error closing output file (%s): %w", outfn, err)
}
os.Chmod(outfn, 0755) // make it executable
outfn, _ = filepath.Abs(outfn)
return outfn, nil
}
// downloadCmd represents the download command
var downloadCmd = &cobra.Command{
Use: "download <destination> <integration> <version>",
Short: "download an integration from the registry",
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
logger := log.NewCommandLogger(cmd)
defer logger.Close()
destDir := args[0]
fullIntegration := args[1]
version := args[2]
os.MkdirAll(destDir, 0700)
channel, _ := cmd.Flags().GetString("channel")
tok := strings.Split(fullIntegration, "/")
if len(tok) != 2 {
log.Fatal(logger, "integration should be in the format: publisher/integration such as pinpt/github")
}
publisher := tok[0]
integration := tok[1]
outfn, err := downloadIntegration(logger, channel, destDir, publisher, integration, version)
if err != nil {
log.Fatal(logger, "error downloading integration", "err", err)
}
log.Info(logger, "platform integration available at "+outfn)
},
}
func init() {
rootCmd.AddCommand(downloadCmd)
downloadCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", "stable"), "the channel which can be set")
}
|
[
"\"PP_CHANNEL\", \"stable\""
] |
[] |
[
"PP_CHANNEL\", \"stable"
] |
[]
|
["PP_CHANNEL\", \"stable"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"context"
"fmt"
"log"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"syscall"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/ehazlett/simplelog"
"github.com/rancher/norman/pkg/dump"
"github.com/rancher/norman/signal"
"github.com/rancher/rancher/app"
"github.com/rancher/rancher/k8s"
"github.com/rancher/rancher/pkg/logserver"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
VERSION = "dev"
)
func main() {
app.RegisterPasswordResetCommand()
if reexec.Init() {
return
}
os.Unsetenv("SSH_AUTH_SOCK")
os.Unsetenv("SSH_AGENT_PID")
os.Setenv("DISABLE_HTTP2", "true")
if dir, err := os.Getwd(); err == nil {
dmPath := filepath.Join(dir, "management-state", "bin")
os.MkdirAll(dmPath, 0700)
newPath := fmt.Sprintf("%s%s%s", dmPath, string(os.PathListSeparator), os.Getenv("PATH"))
os.Setenv("PATH", newPath)
}
var config app.Config
app := cli.NewApp()
app.Version = VERSION
app.Usage = "Complete container management platform"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "kubeconfig",
Usage: "Kube config for accessing k8s cluster",
EnvVar: "KUBECONFIG",
Destination: &config.KubeConfig,
},
cli.BoolFlag{
Name: "debug",
Usage: "Enable debug logs",
Destination: &config.Debug,
},
cli.StringFlag{
Name: "add-local",
Usage: "Add local cluster (true, false, auto)",
Value: "auto",
Destination: &config.AddLocal,
},
cli.IntFlag{
Name: "http-listen-port",
Usage: "HTTP listen port",
Value: 8080,
Destination: &config.HTTPListenPort,
},
cli.IntFlag{
Name: "https-listen-port",
Usage: "HTTPS listen port",
Value: 8443,
Destination: &config.HTTPSListenPort,
},
cli.StringFlag{
Name: "k8s-mode",
Usage: "Mode to run or access k8s API server for management API (embedded, external, auto)",
Value: "auto",
Destination: &config.K8sMode,
},
cli.StringFlag{
Name: "log-format",
Usage: "Log formatter used (json, text, simple)",
Value: "simple",
},
cli.StringSliceFlag{
Name: "acme-domain",
Usage: "Domain to register with LetsEncrypt",
},
cli.BoolFlag{
Name: "no-cacerts",
Usage: "Skip CA certs population in settings when set to true",
},
cli.StringFlag{
Name: "audit-log-path",
EnvVar: "AUDIT_LOG_PATH",
Value: "/var/log/auditlog/rancher-api-audit.log",
Usage: "Log path for Rancher Server API. Default path is /var/log/auditlog/rancher-api-audit.log",
},
cli.IntFlag{
Name: "audit-log-maxage",
Value: 10,
EnvVar: "AUDIT_LOG_MAXAGE",
Usage: "Defined the maximum number of days to retain old audit log files",
},
cli.IntFlag{
Name: "audit-log-maxbackup",
Value: 10,
EnvVar: "AUDIT_LOG_MAXBACKUP",
Usage: "Defines the maximum number of audit log files to retain",
},
cli.IntFlag{
Name: "audit-log-maxsize",
Value: 100,
EnvVar: "AUDIT_LOG_MAXSIZE",
Usage: "Defines the maximum size in megabytes of the audit log file before it gets rotated, default size is 100M",
},
cli.IntFlag{
Name: "audit-level",
Value: 0,
EnvVar: "AUDIT_LEVEL",
Usage: "Audit log level: 0 - disable audit log, 1 - log event metadata, 2 - log event metadata and request body, 3 - log event metadata, request body and response body",
},
}
app.Action = func(c *cli.Context) error {
// enable profiler
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
config.ACMEDomains = c.GlobalStringSlice("acme-domain")
config.NoCACerts = c.Bool("no-cacerts")
config.AuditLevel = c.Int("audit-level")
config.AuditLogPath = c.String("audit-log-path")
config.AuditLogMaxage = c.Int("audit-log-maxage")
config.AuditLogMaxbackup = c.Int("audit-log-maxbackup")
config.AuditLogMaxsize = c.Int("audit-log-maxsize")
initLogs(c, config)
return run(config)
}
app.ExitErrHandler = func(c *cli.Context, err error) {
logrus.Fatal(err)
}
app.Run(os.Args)
}
func initLogs(c *cli.Context, cfg app.Config) {
if cfg.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
switch c.String("log-format") {
case "simple":
logrus.SetFormatter(&simplelog.StandardFormatter{})
case "text":
logrus.SetFormatter(&logrus.TextFormatter{})
case "json":
logrus.SetFormatter(&logrus.JSONFormatter{})
}
logrus.SetOutput(os.Stdout)
logserver.StartServerWithDefaults()
}
func run(cfg app.Config) error {
logrus.Infof("Rancher version %s is starting", VERSION)
logrus.Infof("Rancher arguments %+v", cfg)
dump.GoroutineDumpOn(syscall.SIGUSR1, syscall.SIGILL)
ctx := signal.SigTermCancelContext(context.Background())
embedded, ctx, kubeConfig, err := k8s.GetConfig(ctx, cfg.K8sMode, cfg.KubeConfig)
if err != nil {
return err
}
cfg.Embedded = embedded
os.Unsetenv("KUBECONFIG")
kubeConfig.Timeout = 30 * time.Second
return app.Run(ctx, *kubeConfig, &cfg)
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
dev/Gems/CloudGemFramework/v1/AWS/lambda-code/ServiceLambda/resource_types/test/test_unit_Custom_InterfaceDependencyResolver.py
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from unittest import mock
from unittest.mock import MagicMock
import os
from .custom_resource_test_case import CustomResourceTestCase
from resource_manager_common.test.mock_stack_info import MockResourceGroupInfo
# Need to patch the environment before loading InterfaceDependencyResolver
TEST_REGION = 'test-region'
os.environ['AWS_DEFAULT_REGION'] = TEST_REGION
from resource_types import InterfaceDependencyResolver
REST_API_ID = 'TestRestApiId'
PATH = 'path'
STAGE_NAME = 'api'
DEFAULT_URL = 'https://{}.execute-api.{}.amazonaws.com/{}/{}'.format(REST_API_ID, MockResourceGroupInfo.MOCK_REGION, STAGE_NAME, PATH)
CUSTOM_DOMAIN_NAME = 'TestCustomDomainName'
ALTERNATIVE_URL = 'https://{}/{}.{}.{}/{}'.format(CUSTOM_DOMAIN_NAME, MockResourceGroupInfo.MOCK_REGION, STAGE_NAME, REST_API_ID, PATH)
# Specific unit tests to test setting up Cognito User pools
class UnitTest_CloudGemFramework_ResourceTypeResourcesHandler_InterfaceDependencyResolver(CustomResourceTestCase):
@mock.patch.object(InterfaceDependencyResolver, 'InterfaceUrlParts')
@mock.patch.object(InterfaceDependencyResolver, 'InterfaceUrlParts')
def test_parse_default_interface_url(self, *args):
os.environ['CustomDomainName'] = ''
InterfaceDependencyResolver._parse_interface_url(DEFAULT_URL)
response = {
'api_id': REST_API_ID,
'region': MockResourceGroupInfo.MOCK_REGION,
'stage_name': STAGE_NAME,
'path': PATH
}
InterfaceDependencyResolver.InterfaceUrlParts.assert_called_once_with(
api_id=REST_API_ID, path=PATH, region=MockResourceGroupInfo.MOCK_REGION, stage_name=STAGE_NAME)
@mock.patch.object(InterfaceDependencyResolver, 'InterfaceUrlParts')
def test_parse_interface_url_with_custom_domain_name(self, *args):
os.environ['CustomDomainName'] = CUSTOM_DOMAIN_NAME
InterfaceDependencyResolver._parse_interface_url(ALTERNATIVE_URL)
response = {
'api_id': REST_API_ID,
'region': MockResourceGroupInfo.MOCK_REGION,
'stage_name': STAGE_NAME,
'path': PATH
}
InterfaceDependencyResolver.InterfaceUrlParts.assert_called_once_with(
api_id=REST_API_ID, path=PATH, region=MockResourceGroupInfo.MOCK_REGION, stage_name=STAGE_NAME)
|
[] |
[] |
[
"CustomDomainName",
"AWS_DEFAULT_REGION"
] |
[]
|
["CustomDomainName", "AWS_DEFAULT_REGION"]
|
python
| 2 | 0 | |
internal/build/cmd/tools/commands/spec/command.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package cmd
import (
"archive/zip"
"bytes"
"encoding/json"
"fmt"
"github.com/elastic/go-elasticsearch/v8/internal/build/cmd"
"github.com/elastic/go-elasticsearch/v8/internal/build/utils"
"github.com/elastic/go-elasticsearch/v8/internal/version"
"github.com/spf13/cobra"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
)
var (
output *string
commitHash *string
debug *bool
info *bool
)
func init() {
output = toolsCmd.Flags().StringP("output", "o", "", "Path to a folder for generated output")
commitHash = toolsCmd.Flags().StringP("commit_hash", "c", "", "Elasticsearch commit hash")
debug = toolsCmd.Flags().BoolP("debug", "d", false, "Print the generated source to terminal")
info = toolsCmd.Flags().Bool("info", false, "Print the API details to terminal")
cmd.RegisterCmd(toolsCmd)
}
var toolsCmd = &cobra.Command{
Use: "download-spec",
Short: "Download specification artifact for code & tests generation",
Run: func(cmd *cobra.Command, args []string) {
command := &Command{
Output: *output,
CommitHash: *commitHash,
Debug: *debug,
Info: *info,
}
err := command.Execute()
if err != nil {
utils.PrintErr(err)
os.Exit(1)
}
},
}
type Command struct {
Output string
CommitHash string
Debug bool
Info bool
}
// download-spec runs a query to the Elastic artifact API, retrieve the list of active artifacts
// downloads, extract and write to disk the rest-resources spec alongside a json with relevant build information.
func (c Command) Execute() (err error) {
const artifactsUrl = "https://artifacts-api.elastic.co/v1/versions"
esBuildVersion := os.Getenv("ELASTICSEARCH_BUILD_VERSION")
if esBuildVersion == "" {
esBuildVersion = version.Client
}
versionUrl := strings.Join([]string{artifactsUrl, esBuildVersion}, "/")
res, err := http.Get(versionUrl)
if err != nil {
log.Fatalf(err.Error())
}
defer res.Body.Close()
var v Versions
dec := json.NewDecoder(res.Body)
err = dec.Decode(&v)
if err != nil {
log.Fatalf(err.Error())
}
if c.Debug {
log.Printf("%d builds found", len(v.Version.Builds))
}
var build Build
if c.CommitHash != "" {
if build, err = findBuildByCommitHash(c.CommitHash, v.Version.Builds); err != nil {
build = findMostRecentBuild(v.Version.Builds)
}
} else {
build = findMostRecentBuild(v.Version.Builds)
}
if c.Debug {
log.Printf("Build found : %s", build.Projects.Elasticsearch.CommitHash)
}
data, err := c.downloadZip(build)
if err != nil {
log.Fatalf("Cannot download zip from %s, reason : %s", build.zipfileUrl(), err)
}
if err := c.extractZipToDest(data); err != nil {
log.Fatalf(err.Error())
}
d, _ := json.Marshal(build)
err = c.writeFileToDest("elasticsearch.json", d)
if err != nil {
log.Fatalf(err.Error())
}
return nil
}
func (c Command) writeFileToDest(filename string, data []byte) error {
path := filepath.Join(c.Output, filename)
if err := ioutil.WriteFile(path, data, 0644); err != nil {
return fmt.Errorf("cannot write file: %s", err)
}
if c.Debug {
log.Printf("Successfuly written file to : %s", path)
}
return nil
}
type Versions struct {
Version struct {
Builds []Build `json:"builds"`
} `json:"version"`
}
type PackageUrl struct {
*url.URL
}
func (p *PackageUrl) UnmarshalJSON(data []byte) error {
if string(data) == "null" {
return nil
}
url, err := url.Parse(string(data[1 : len(data)-1]))
if err == nil {
p.URL = url
}
return err
}
type BuildStartTime struct {
*time.Time
}
func (t *BuildStartTime) UnmarshalJSON(data []byte) error {
if string(data) == "null" {
return nil
}
var err error
parsedTime, err := time.Parse(`"`+"Mon, 2 Jan 2006 15:04:05 MST"+`"`, string(data))
t.Time = &parsedTime
return err
}
type Build struct {
StartTime BuildStartTime `json:"start_time"`
Version string `json:"version"`
BuildId string `json:"build_id"`
Projects struct {
Elasticsearch struct {
Branch string `json:"branch"`
CommitHash string `json:"commit_hash"`
Packages map[string]struct {
Url PackageUrl `json:"url"`
Type string `json:"type"`
}
} `json:"elasticsearch"`
} `json:"projects"`
}
func NewBuild() Build {
t := time.Date(1970, 0, 0, 0, 0, 0, 0, time.UTC)
startTime := BuildStartTime{Time: &t}
return Build{StartTime: startTime}
}
// zipfileUrl return the file URL for the rest-resources artifact from Build
// There should be only one artifact matching the requirements par Build.
func (b Build) zipfileUrl() string {
for _, pack := range b.Projects.Elasticsearch.Packages {
if pack.Type == "zip" && strings.Contains(pack.Url.String(), "rest-resources") {
return pack.Url.String()
}
}
return ""
}
// extractZipToDest extract the data from a previously downloaded file loaded in memory to Output target.
func (c Command) extractZipToDest(data []byte) error {
zipReader, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
if err != nil {
return err
}
if err = os.MkdirAll(c.Output, 0744); err != nil {
return fmt.Errorf("cannot created destination directory: %s", err)
}
for _, file := range zipReader.File {
f, err := file.Open()
if err != nil {
return fmt.Errorf("cannot read file in zipfile: %s", err)
}
defer f.Close()
if file.FileInfo().IsDir() {
path := filepath.Join(c.Output, file.Name)
_ = os.MkdirAll(path, 0744)
} else {
data, err := ioutil.ReadAll(f)
if err != nil {
return err
}
if err := c.writeFileToDest(file.Name, data); err != nil {
return err
}
}
}
if c.Debug {
log.Printf("Zipfile successfully extracted to %s", c.Output)
}
return nil
}
// downloadZip fetches the rest-resources artifact from a Build and return its content as []byte.
func (c Command) downloadZip(b Build) ([]byte, error) {
url := b.zipfileUrl()
if c.Debug {
log.Printf("Zipfile url : %s", b.zipfileUrl())
}
client := &http.Client{}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
req.Header.Add("Accept-Content", "gzip")
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
data, _ := ioutil.ReadAll(res.Body)
return data, err
}
// findMostRecentBuild iterates through the builds retrieved from the api
// and return the latest one based on the StartTime of each Build.
func findMostRecentBuild(builds []Build) Build {
var latestBuild Build
latestBuild = NewBuild()
for _, build := range builds {
if build.StartTime.After(*latestBuild.StartTime.Time) {
latestBuild = build
}
}
return latestBuild
}
// findBuildByCommitHash iterates through the builds and returns the first occurrence of Build
// that matches the provided commitHash.
func findBuildByCommitHash(commitHash string, builds []Build) (Build, error) {
for _, build := range builds {
if build.Projects.Elasticsearch.CommitHash == commitHash {
return build, nil
}
}
return Build{}, fmt.Errorf("Build with commit hash %s not found", commitHash)
}
|
[
"\"ELASTICSEARCH_BUILD_VERSION\""
] |
[] |
[
"ELASTICSEARCH_BUILD_VERSION"
] |
[]
|
["ELASTICSEARCH_BUILD_VERSION"]
|
go
| 1 | 0 | |
src/firestone_engine/DataLoader.py
|
import tushare
from datetime import datetime, timedelta
import pytz
import logging
import json
import os
from pymongo import MongoClient
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from .ProxyManager import ProxyManager
from .Constants import Constants
from .HotConcept import HotConcept
class DataLoader(object):
UTC_8 = pytz.timezone('Asia/Shanghai')
_logger = logging.getLogger(__name__)
_MONFO_URL = '127.0.0.1'
_DATA_DB = 'firestone-data'
_CODE_FROM_DB = '000000'
def __init__(self, code_list, is_mock=False, mock_trade=False, date=None, hours=['9','11','10,13-14'], minutes=['30-59','0-29','*']):
self.proxyManager = ProxyManager()
self.use_proxy = False
self.hours = hours
self.minutes = minutes
self.is_mock = is_mock
self.mock_trade = mock_trade
self.is_finsih_flag = False
self.lastRows = {}
self.client = MongoClient(DataLoader._MONFO_URL, 27017)
self.data_db = self.client[DataLoader._DATA_DB]
self.db = self.client[os.environ['FR_DB']]
self.hot_concept = HotConcept(self.db)
self.scheduler = BackgroundScheduler()
self.date = date
today = datetime.now()
self.today = '{}-{}-{}'.format(today.year,('0' + str(today.month))[-2:],('0' + str(today.day))[-2:])
# self.today_datetime = datetime(today.year,today.month,today.day,tzinfo=DataLoader.UTC_8)
if(self.date is None):
self.date = self.today
end_date = today + timedelta(days = 1)
end_date = '{}-{}-{}'.format(end_date.year,('0' + str(end_date.month))[-2:],('0' + str(end_date.day))[-2:])
self.load_codes_from_db = False
self.code_list = self.get_code_list(code_list)
for i, hour in enumerate(hours):
trigger = CronTrigger(hour=hour,minute=minutes[i],second='*/3', end_date=end_date)
trigger_concept = CronTrigger(hour=hour,minute=minutes[i],second='0', end_date=end_date)
if(i == len(hours) - 1):
self.scheduler.add_job(self.run,id="last_job",trigger=trigger)
else:
self.scheduler.add_job(self.run,trigger=trigger)
self.scheduler.add_job(self.run_concept,trigger=trigger_concept)
def get_code_list(self, code_list):
if(DataLoader._CODE_FROM_DB in code_list):
self.load_codes_from_db = True
return [DataLoader._CODE_FROM_DB]
colls = list(self.data_db.list_collections())
codes = []
for code in code_list:
if(code == 'N/A'):
continue
name = code + '-' + self.date + ('-m' if self.is_mock else '')
if(name not in [coll['name'] for coll in colls]):
codes.append(code)
self.data_db.create_collection(name)
if(len(codes) == 0):
self.is_finsih_flag = True
return codes
def start(self):
if(self.is_finsih_flag):
return
self.scheduler.start()
DataLoader._logger.info('job get data for {} is start'.format(self.code_list))
def is_finsih(self):
job = self.scheduler.get_job('last_job')
return self.is_finsih_flag or job is None or job.next_run_time is None
def stop(self):
self.client.close()
self.scheduler.shutdown(wait=True)
DataLoader._logger.info('job get data for {} is stop'.format(self.code_list))
def get_code_list_from_db(self):
colname = 'trades'
if(self.mock_trade):
colname = 'mocktrades'
codes_data = self.db[colname].find({"deleted":False, "params.executeDate" : self.today},{"code" : 1, "_id" : 0})
code_list = [code_data["code"] for code_data in list(codes_data) if code_data["code"] != 'N/A']
temp_list = []
for code in code_list:
if(',' in code):
temp_list.extend(code.split(','))
else:
temp_list.append(code)
code_list = temp_list
for code in code_list:
if(code.startswith('3')):
if(Constants.INDEX[5] not in code_list):
code_list.append(Constants.INDEX[5])
else:
if(Constants.INDEX[0] not in code_list):
code_list.append(Constants.INDEX[0])
return list(set(code_list))
def load_data(self):
list_wrapper = []
size = len(self.code_list)
df_result = None
if(size > 50):
list_size = (size // 50) + (1 if (size % 50) > 0 else 0)
for i in range(list_size):
list_wrapper.append(self.code_list[i * 50 : i * 50 + 50])
else:
list_wrapper.append(self.code_list)
if(self.use_proxy):
for l in list_wrapper:
try:
df = tushare.get_realtime_quotes(l, proxyManager=self.proxyManager)
if(df_result is None):
df_result = df
else:
df_result = df_result.append(df)
except Exception as e:
DataLoader._logger.error('load data error, use_proxy = {}, e = {}'.format(self.use_proxy, e))
self.use_proxy = True
self.proxyManager.remove_proxy()
else:
for i, l in enumerate(list_wrapper):
try:
if(i == 0):
df = tushare.get_realtime_quotes(l)
else:
df = tushare.get_realtime_quotes(l, proxyManager=self.proxyManager)
if(df_result is None):
df_result = df
else:
df_result = df_result.append(df)
except Exception as e:
DataLoader._logger.error('load data error, use_proxy = {}, e = {}'.format(self.use_proxy, e))
self.use_proxy = True
self.proxyManager.remove_proxy()
return df_result
def run(self):
try:
if(self.load_codes_from_db):
self.code_list = self.get_code_list_from_db()
DataLoader._logger.info('start get the data for {}'.format(self.code_list))
if(len(self.code_list) < 2):
return
if(self.is_mock):
self.run_mock()
else:
df = self.load_data()
if(df is None):
DataLoader._logger.error('failed to get the data for {}'.format(self.code_list))
return
json_list = json.loads(df.to_json(orient='records'))
DataLoader._logger.info('get data length = {}'.format(len(json_list)))
for json_data in json_list:
code = json_data['code']
code = Constants.map_code(json_data['name'], json_data['code'])
if(code not in self.lastRows):
self.lastRows[code] = None
if(self.lastRows[code] is None or self.lastRows[code]['time'] != json_data['time']):
json_data['real_time'] = datetime.now()
self.data_db[code + '-' + self.today].insert(json_data)
self.lastRows[code] = json_data
except Exception as e:
DataLoader._logger.error(e)
def run_concept(self):
try:
self.hot_concept.load_hot_concept()
except Exception as e:
DataLoader._logger.error(f'load hot concept failed, e = {e}')
def run_mock(self):
try:
if(not hasattr(self, 'mock_count')):
self.mock_count = 0
self.data = {}
for code in self.code_list:
self.data[code + '-' + self.date] = list(self.data_db[code + '-' + self.date].find())
self.lastRows[code] = None
for code in self.code_list:
if self.mock_count < len(self.data[code + '-' + self.date]):
json_data = self.data[code + '-' + self.date][self.mock_count]
json_data['real_time'] = datetime.now()
if(self.lastRows[code] is None or self.lastRows[code]['time'] != json_data['time']):
self.data_db[code + '-' + self.date + '-m'].insert(json_data)
self.lastRows[code] = json_data
self.mock_count += 1
except Exception as e:
DataLoader._logger.error(e)
|
[] |
[] |
[
"FR_DB"
] |
[]
|
["FR_DB"]
|
python
| 1 | 0 | |
cmd/watt/aggregator/aggregator.go
|
package aggregator
import (
"bytes"
"context"
"encoding/json"
"strings"
"sync"
"time"
"github.com/datawire/ambassador/cmd/watt/thingconsul"
"github.com/datawire/ambassador/cmd/watt/thingkube"
"github.com/datawire/ambassador/cmd/watt/watchapi"
"github.com/datawire/ambassador/pkg/consulwatch"
"github.com/datawire/ambassador/pkg/k8s"
"github.com/datawire/ambassador/pkg/kates"
"github.com/datawire/ambassador/pkg/limiter"
"github.com/datawire/ambassador/pkg/supervisor"
"github.com/datawire/ambassador/pkg/watt"
"github.com/datawire/dlib/dexec"
"github.com/datawire/dlib/dlog"
)
type WatchHook func(p *supervisor.Process, snapshot string) watchapi.WatchSet
type Aggregator struct {
// Public //////////////////////////////////////////////////////////////
// Public input channels that other things can use to send us information.
KubernetesEvents chan<- thingkube.K8sEvent // Kubernetes state
ConsulEvents chan<- thingconsul.ConsulEvent // Consul endpoints
// Internal ////////////////////////////////////////////////////////////
// These are the read-ends of those public inputs
kubernetesEvents <-chan thingkube.K8sEvent
consulEvents <-chan thingconsul.ConsulEvent
// Output channel used to send info to other things
k8sWatches chan<- []watchapi.KubernetesWatchSpec // the k8s watch manager
consulWatches chan<- []watchapi.ConsulWatchSpec // consul watch manager
snapshots chan<- string // the invoker
// Static information that doesn't change after initialization
requiredKinds []string // not considered "bootstrapped" until we hear about all these kinds
watchHook WatchHook
limiter limiter.Limiter
validator *kates.Validator
// Runtime information that changes
resourcesMu sync.RWMutex
ids map[string]bool
kubernetesResources map[string]map[string][]k8s.Resource
consulEndpoints map[string]consulwatch.Endpoints
errorsMu sync.RWMutex
errors map[string][]watt.Error
notifyMux sync.Mutex
bootstrapped bool
}
func NewAggregator(snapshots chan<- string, k8sWatches chan<- []watchapi.KubernetesWatchSpec, consulWatches chan<- []watchapi.ConsulWatchSpec,
requiredKinds []string, watchHook WatchHook, limiter limiter.Limiter, validator *kates.Validator) *Aggregator {
kubernetesEvents := make(chan thingkube.K8sEvent)
consulEvents := make(chan thingconsul.ConsulEvent)
return &Aggregator{
// public
KubernetesEvents: kubernetesEvents,
ConsulEvents: consulEvents,
// internal
kubernetesEvents: kubernetesEvents,
consulEvents: consulEvents,
k8sWatches: k8sWatches,
consulWatches: consulWatches,
snapshots: snapshots,
requiredKinds: requiredKinds,
watchHook: watchHook,
limiter: limiter,
ids: make(map[string]bool),
kubernetesResources: make(map[string]map[string][]k8s.Resource),
consulEndpoints: make(map[string]consulwatch.Endpoints),
errors: make(map[string][]watt.Error),
validator: validator,
}
}
func (a *Aggregator) Work(p *supervisor.Process) error {
// In order to invoke `maybeNotify`, which is a very time consuming
// operation, we coalesce events:
//
// 1. Be continuously reading all available events from
// a.kubernetesEvents and a.consulEvents and store thingkube.K8sEvents
// in the potentialKubernetesEventSignal variable. This means
// at any given point (modulo caveats below), the
// potentialKubernetesEventSignal variable will have the
// latest Kubernetes event available.
//
// 2. At the same time, whenever there is capacity to write
// down the kubernetesEventProcessor channel, we send
// potentialKubernetesEventSignal to be processed.
//
// The anonymous goroutine below will be constantly reading
// from the kubernetesEventProcessor channel and performing
// a blocking a.maybeNotify(). This means that we can only
// *write* to the kubernetesEventProcessor channel when we are
// not currently processing an event, but when that happens, we
// will still read from a.kubernetesEvents and a.consulEvents
// and update potentialKubernetesEventSignal.
//
// There are three caveats to the above:
//
// 1. At startup, we don't yet have a event to write, but
// we're not processing anything, so we will try to write
// something down the kubernetesEventProcessor channel.
// To cope with this, the invoking goroutine will ignore events
// signals that have a event.skip flag.
//
// 2. If we process an event quickly, or if there aren't new
// events available, then we end up busy looping and
// sending the same potentialKubernetesEventSignal value down
// the kubernetesEventProcessor channel multiple times. To cope
// with this, whenever we have successfully written to the
// kubernetesEventProcessor channel, we do a *blocking* read of
// the next event from a.kubernetesEvents and a.consulEvents.
//
// 3. Always be calling a.setKubernetesResources as soon as we
// receive an event. This is a fast non-blocking call that
// update watches, we can't coalesce this call.
p.Ready()
type eventSignal struct {
kubernetesEvent thingkube.K8sEvent
skip bool
}
kubernetesEventProcessor := make(chan eventSignal)
go func() {
for event := range kubernetesEventProcessor {
if event.skip {
// ignore the initial eventSignal to deal with the
// corner case where we haven't yet received an event yet.
continue
}
a.maybeNotify(p)
}
}()
potentialKubernetesEventSignal := eventSignal{kubernetesEvent: thingkube.K8sEvent{}, skip: true}
for {
select {
case potentialKubernetesEvent := <-a.kubernetesEvents:
// if a new KubernetesEvents is available to be read,
// and we can't write to the kubernetesEventProcessor channel,
// then we will overwrite potentialKubernetesEvent
// with a newer event while still processing a.setKubernetesResources
a.setKubernetesResources(potentialKubernetesEvent)
potentialKubernetesEventSignal = eventSignal{kubernetesEvent: potentialKubernetesEvent, skip: false}
case kubernetesEventProcessor <- potentialKubernetesEventSignal:
// if we aren't currently blocked in
// a.maybeNotify() then the above goroutine will be
// reading from the kubernetesEventProcessor channel and we
// will send the current potentialKubernetesEventSignal
// value over the kubernetesEventProcessor channel to be
// processed
select {
case potentialKubernetesEvent := <-a.kubernetesEvents:
// here we do blocking read of the next event for caveat #2.
a.setKubernetesResources(potentialKubernetesEvent)
potentialKubernetesEventSignal = eventSignal{kubernetesEvent: potentialKubernetesEvent, skip: false}
case event := <-a.consulEvents:
a.updateConsulResources(event)
a.maybeNotify(p)
case <-p.Shutdown():
return nil
}
case event := <-a.consulEvents:
// we are always reading and processing ConsulEvents directly,
// not coalescing them.
a.updateConsulResources(event)
a.maybeNotify(p)
case <-p.Shutdown():
return nil
}
}
}
func (a *Aggregator) updateConsulResources(event thingconsul.ConsulEvent) {
a.resourcesMu.Lock()
defer a.resourcesMu.Unlock()
a.ids[event.WatchId] = true
a.consulEndpoints[event.Endpoints.Service] = event.Endpoints
}
func (a *Aggregator) setKubernetesResources(event thingkube.K8sEvent) {
if len(event.Errors) > 0 {
a.errorsMu.Lock()
defer a.errorsMu.Unlock()
for _, kError := range event.Errors {
a.errors[kError.Source] = append(a.errors[kError.Source], kError)
}
} else {
a.resourcesMu.Lock()
defer a.resourcesMu.Unlock()
a.ids[event.WatchID] = true
submap, ok := a.kubernetesResources[event.WatchID]
if !ok {
submap = make(map[string][]k8s.Resource)
a.kubernetesResources[event.WatchID] = submap
}
submap[event.Kind] = event.Resources
}
}
func (a *Aggregator) generateSnapshot(p *supervisor.Process) (string, error) {
a.errorsMu.RLock()
defer a.errorsMu.RUnlock()
a.resourcesMu.RLock()
defer a.resourcesMu.RUnlock()
k8sResources := make(map[string][]k8s.Resource)
for _, submap := range a.kubernetesResources {
for k, v := range submap {
a.validate(p, v)
k8sResources[k] = append(k8sResources[k], v...)
}
}
s := watt.Snapshot{
Consul: watt.ConsulSnapshot{Endpoints: a.consulEndpoints},
Kubernetes: k8sResources,
Errors: a.errors,
}
jsonBytes, err := json.MarshalIndent(s, "", " ")
if err != nil {
return "{}", err
}
return string(jsonBytes), nil
}
// watt only runs in legacy mode now, and legacy mode is defined
// to not do fast validation.
// var fastValidation = len(os.Getenv("AMBASSADOR_FAST_VALIDATION")) > 0
var fastValidation = false
func (a *Aggregator) validate(p *supervisor.Process, resources []k8s.Resource) {
if !fastValidation {
return
}
for _, r := range resources {
err := a.validator.Validate(p.Context(), map[string]interface{}(r))
if err == nil {
delete(r, "errors")
} else {
r["errors"] = err.Error()
}
}
}
func (a *Aggregator) isKubernetesBootstrapped(p *supervisor.Process) bool {
a.resourcesMu.RLock()
defer a.resourcesMu.RUnlock()
submap, sok := a.kubernetesResources[""]
if !sok {
return false
}
for _, k := range a.requiredKinds {
_, ok := submap[k]
if !ok {
return false
}
}
return true
}
// Returns true if the current state of the world is complete. The
// kubernetes state of the world is always complete by definition
// because the kubernetes client provides that guarantee. The
// aggregate state of the world is complete when any consul services
// referenced by kubernetes have populated endpoint information (even
// if the value of the populated info is an empty set of endpoints).
func (a *Aggregator) isComplete(p *supervisor.Process, watchset watchapi.WatchSet) bool {
a.resourcesMu.RLock()
defer a.resourcesMu.RUnlock()
complete := true
for _, w := range watchset.KubernetesWatches {
if _, ok := a.ids[w.WatchId()]; ok {
p.Debugf("initialized k8s watch: %s", w.WatchId())
} else {
complete = false
p.Debugf("waiting for k8s watch: %s", w.WatchId())
}
}
for _, w := range watchset.ConsulWatches {
if _, ok := a.ids[w.WatchId()]; ok {
p.Debugf("initialized consul watch: %s", w.WatchId())
} else {
complete = false
p.Debugf("waiting for consul watch: %s", w.WatchId())
}
}
return complete
}
func (a *Aggregator) maybeNotify(p *supervisor.Process) {
now := time.Now()
delay := a.limiter.Limit(now)
if delay == 0 {
a.notify(p)
} else if delay > 0 {
time.AfterFunc(delay, func() {
a.notify(p)
})
}
}
func (a *Aggregator) notify(p *supervisor.Process) {
a.notifyMux.Lock()
defer a.notifyMux.Unlock()
if !a.isKubernetesBootstrapped(p) {
return
}
watchset := a.getWatches(p)
p.Debugf("found %d kubernetes watches", len(watchset.KubernetesWatches))
p.Debugf("found %d consul watches", len(watchset.ConsulWatches))
a.k8sWatches <- watchset.KubernetesWatches
a.consulWatches <- watchset.ConsulWatches
if !a.bootstrapped && a.isComplete(p, watchset) {
p.Logf("bootstrapped!")
a.bootstrapped = true
}
if a.bootstrapped {
snapshot, err := a.generateSnapshot(p)
if err != nil {
p.Logf("generate snapshot failed %v", err)
return
}
a.snapshots <- snapshot
}
}
func (a *Aggregator) getWatches(p *supervisor.Process) watchapi.WatchSet {
snapshot, err := a.generateSnapshot(p)
if err != nil {
p.Logf("generate snapshot failed %v", err)
return watchapi.WatchSet{}
}
result := a.watchHook(p, snapshot)
return result.Interpolate()
}
func ExecWatchHook(watchHooks []string) WatchHook {
return func(p *supervisor.Process, snapshot string) watchapi.WatchSet {
result := watchapi.WatchSet{}
for _, hook := range watchHooks {
ws := invokeHook(p.Context(), hook, snapshot)
result.KubernetesWatches = append(result.KubernetesWatches, ws.KubernetesWatches...)
result.ConsulWatches = append(result.ConsulWatches, ws.ConsulWatches...)
}
return result
}
}
func invokeHook(ctx context.Context, hook, snapshot string) watchapi.WatchSet {
cmd := dexec.CommandContext(ctx, "sh", "-c", hook)
cmd.Stdin = strings.NewReader(snapshot)
watches, err := cmd.Output()
if err != nil {
dlog.Infof(ctx, "watch hook failed: %v", err)
return watchapi.WatchSet{}
}
decoder := json.NewDecoder(bytes.NewReader(watches))
decoder.DisallowUnknownFields()
var result watchapi.WatchSet
if err := decoder.Decode(&result); err != nil {
dlog.Infof(ctx, "watchset decode failed: %v", err)
return watchapi.WatchSet{}
}
return result
}
|
[
"\"AMBASSADOR_FAST_VALIDATION\""
] |
[] |
[
"AMBASSADOR_FAST_VALIDATION"
] |
[]
|
["AMBASSADOR_FAST_VALIDATION"]
|
go
| 1 | 0 | |
ezequiel_django/settings.py
|
"""
Django settings for ezequiel_django project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ['MODE'] != 'production'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ezequiel',
'rest_framework',
'rest_framework.authtoken',
'djoser',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ezequiel_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ezequiel_django.wsgi.application'
ASGI_APPLICATION = 'ezequiel_django.asgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DATABASE_NAME'),
'USER': os.getenv('DATABASE_USER'),
'PASSWORD': os.getenv('DATABASE_PASSWORD'),
'HOST': os.getenv('DATABASE_HOST'),
'PORT': os.getenv('DATABASE_PORT')
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
CORS_ALLOWED_ORIGINS = ['https://ezequiel-web.herokuapp.com', 'http://localhost:8000', 'http://127.0.0.1:8000']
# CSRF_COOKIE_SECURE = os.environ['MODE'] == 'production'
# SESSION_COOKIE_SECURE = os.environ['MODE'] == 'production'
# SECURE_SSL_REDIRECT = os.environ['MODE'] == 'production'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
DJOSER = {
'SERIALIZERS': {
'current_user': 'ezequiel.serializers.UserSerializer',
}
}
AUTH_USER_MODEL = 'ezequiel.User'
|
[] |
[] |
[
"DATABASE_PASSWORD",
"DATABASE_NAME",
"DATABASE_HOST",
"MODE",
"SECRET_KEY",
"DATABASE_USER",
"DATABASE_PORT"
] |
[]
|
["DATABASE_PASSWORD", "DATABASE_NAME", "DATABASE_HOST", "MODE", "SECRET_KEY", "DATABASE_USER", "DATABASE_PORT"]
|
python
| 7 | 0 | |
mini_programs/filter_fasta.py
|
#!/usr/bin/env python3
# --- imports
import sys
import os
from WGALP.utils.input_manager import InputManager
from WGALP.utils.input_manager import check_files
from WGALP.utils.input_manager import check_folders
from WGALP.utils.genericUtils import binary_search
# --- input arguments
def prepare_input(args):
input_data = InputManager("Select nodes by ID from a Whole Genom Assembly")
input_data.add_arg("--contigs", "path", description="[Required] assembled contigs or scaffolds (.fasta)")
input_data.add_arg("--selected-contigs", "path", description="[Required] a file containing the ids of the selected contigs (each id is in its own line)")
input_data.add_arg("--complement", "flag", description="if set, keeps contigs not in list")
input_data.add_arg("--output", "dir", description="[Required] path to the output folder")
input_data.parse(args)
return input_data
# --- input sanity checks
def sanity_check(output_dir, contigs, selected_contigs, complement):
check_files([contigs, selected_contigs])
try:
check_folders([output_dir])
except Exception:
if os.path.isfile(output_dir):
raise Exception("--output argument must be a directory and not a file")
else:
os.mkdir(output_dir)
# --- core functions
def filter_fasta( fasta_file_path, selected_contigs, out_file_name, keep=True):
"""
Filter unwanted reads from a fasta files
selected_contigs should point to a file with a newline separated list of contig IDs
:param fasta_file_path: path to a fasta_file
:param selected_contigs: path to the file with the IDs of the selected contigs
:param out_file_name: name of the .fasta file created after this filtering
"""
# open input files
fasta_file = open(fasta_file_path, "r")
selected_contigs_file = open(selected_contigs, "r")
# read selected_contigs and sort them
selected_contigs = selected_contigs_file.read().split()
selected_contigs.sort()
out_file = open(out_file_name, "w")
to_be_printed = False
for line in fasta_file:
if line.startswith(">"):
# check if read ID is in the bad list
contig_id = line.split()[0][1:].strip()
if(binary_search(selected_contigs, contig_id) is not None):
to_be_printed = keep
else:
to_be_printed = not keep
if(to_be_printed):
# if it is not, print the read
out_file.write(line)
fasta_file.close()
selected_contigs_file.close()
out_file.close()
return out_file_name
def select_contigs_aux(output_dir, contigs, selected_contigs, complement):
# sanity check
sanity_check(output_dir, contigs, selected_contigs, complement)
out = filter_fasta(contigs, selected_contigs, os.path.join(output_dir, "filtered_contigs.fasta"), keep=not complement)
return out
# --- caller function
def select_contigs(args):
in_manager = prepare_input(args)
contigs = in_manager["--contigs"]["value"]
selected_contigs = in_manager["--selected-contigs"]["value"]
output_dir = in_manager["--output"]["value"]
complement = in_manager["--complement"]["value"]
output = select_contigs_aux(output_dir, contigs, selected_contigs, complement)
print("task completed successfully")
print("filtered .fasta is at the followinf location:")
print("\t" + "filtered_fasta" + " : " + output)
return output
if __name__ == "__main__":
select_contigs(sys.argv[1:])
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
test/e2e/appmesh/e2e_test.go
|
package appmesh_test
import (
"fmt"
"os"
"path/filepath"
"time"
skclients "github.com/solo-io/solo-kit/pkg/api/v1/clients"
"github.com/solo-io/supergloo/cli/pkg/helpers/clients"
"github.com/solo-io/supergloo/install/helm/supergloo/generate"
sgutils "github.com/solo-io/supergloo/test/e2e/utils"
sgtestutils "github.com/solo-io/supergloo/test/testutils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/solo-io/supergloo/cli/test/utils"
)
const superglooNamespace = "supergloo-system"
var _ = Describe("E2e", func() {
It("registers and tests appmesh", func() {
// install discovery via cli
// start discovery
var superglooErr error
projectRoot := filepath.Join(os.Getenv("GOPATH"), "src", os.Getenv("PROJECT_ROOT"))
err := generate.Run("dev", "Always", projectRoot)
if err == nil {
superglooErr = utils.Supergloo(fmt.Sprintf("init --release latest --values %s", filepath.Join(projectRoot, generate.ValuesOutput)))
} else {
superglooErr = utils.Supergloo("init --release latest")
}
Expect(superglooErr).NotTo(HaveOccurred())
// TODO (ilackarms): add a flag to switch between starting supergloo locally and deploying via cli
sgtestutils.DeleteSuperglooPods(kube, superglooNamespace)
appmeshName := "appmesh"
secretName := "my-secret"
createAWSSecret(secretName)
testRegisterAppmesh(appmeshName, secretName)
testUnregisterAppmesh(appmeshName)
})
})
/*
tests
*/
func testRegisterAppmesh(meshName, secretName string) {
region, vnLabel := "us-east-1", "app"
err := utils.Supergloo(fmt.Sprintf("register appmesh --name %s --region %s "+
"--secret %s.%s --select-namespaces %s --virtual-node-label %s --configmap %s.%s",
meshName, region, superglooNamespace, secretName, namespaceWithInject, vnLabel,
superglooNamespace, "sidecar-injector"))
Expect(err).NotTo(HaveOccurred())
meshClient := clients.MustMeshClient()
Eventually(func() error {
_, err := meshClient.Read(superglooNamespace, meshName, skclients.ReadOpts{})
return err
}).ShouldNot(HaveOccurred())
err = sgtestutils.WaitUntilPodsRunning(time.Minute*4, superglooNamespace,
"sidecar-injector",
)
Expect(err).NotTo(HaveOccurred())
err = sgutils.DeployTestRunner(basicNamespace)
Expect(err).NotTo(HaveOccurred())
err = sgutils.DeployBookInfoAppmesh(namespaceWithInject)
Expect(err).NotTo(HaveOccurred())
err = sgtestutils.WaitUntilPodsRunning(time.Minute*4, basicNamespace,
"testrunner",
)
Expect(err).NotTo(HaveOccurred())
err = sgtestutils.WaitUntilPodsRunning(time.Minute*2, namespaceWithInject,
"reviews-v1",
"reviews-v2",
"reviews-v3",
)
Expect(err).NotTo(HaveOccurred())
checkSidecarInjection()
}
func checkSidecarInjection() {
pods, err := kube.CoreV1().Pods(namespaceWithInject).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
Expect(len(pod.Spec.Containers)).To(BeNumerically(">=", 2))
}
}
func testUnregisterAppmesh(meshName string) {
}
func createAWSSecret(secretName string) {
accessKeyId, secretAccessKey := os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY")
Expect(accessKeyId).NotTo(Equal(""))
Expect(secretAccessKey).NotTo(Equal(""))
err := utils.Supergloo(fmt.Sprintf(
"create secret aws --name %s --access-key-id %s --secret-access-key %s",
secretName, accessKeyId, secretAccessKey,
))
Expect(err).NotTo(HaveOccurred())
secret, err := kube.CoreV1().Secrets(superglooNamespace).Get(secretName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(secret).NotTo(BeNil())
}
|
[
"\"GOPATH\"",
"\"PROJECT_ROOT\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\""
] |
[] |
[
"GOPATH",
"PROJECT_ROOT",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["GOPATH", "PROJECT_ROOT", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
go
| 4 | 0 | |
ci.go
|
package main
import (
"fmt"
"os"
"regexp"
"strconv"
)
// CI represents a common information obtained from all CI platforms
type CI struct {
PR PullRequest
URL string
}
// PullRequest represents a GitHub pull request
type PullRequest struct {
Revision string
Number int
}
func circleci() (ci CI, err error) {
ci.PR.Number = 0
ci.PR.Revision = os.Getenv("CIRCLE_SHA1")
ci.URL = os.Getenv("CIRCLE_BUILD_URL")
pr := os.Getenv("CIRCLE_PULL_REQUEST")
if pr == "" {
pr = os.Getenv("CI_PULL_REQUEST")
}
if pr == "" {
pr = os.Getenv("CIRCLE_PR_NUMBER")
}
if pr == "" {
return ci, nil
}
re := regexp.MustCompile(`[1-9]\d*$`)
ci.PR.Number, err = strconv.Atoi(re.FindString(pr))
if err != nil {
return ci, fmt.Errorf("%v: cannot get env", pr)
}
return ci, nil
}
func travisci() (ci CI, err error) {
ci.PR.Revision = os.Getenv("TRAVIS_PULL_REQUEST_SHA")
ci.PR.Number, err = strconv.Atoi(os.Getenv("TRAVIS_PULL_REQUEST"))
return ci, err
}
|
[
"\"CIRCLE_SHA1\"",
"\"CIRCLE_BUILD_URL\"",
"\"CIRCLE_PULL_REQUEST\"",
"\"CI_PULL_REQUEST\"",
"\"CIRCLE_PR_NUMBER\"",
"\"TRAVIS_PULL_REQUEST_SHA\"",
"\"TRAVIS_PULL_REQUEST\""
] |
[] |
[
"TRAVIS_PULL_REQUEST",
"CIRCLE_SHA1",
"CI_PULL_REQUEST",
"TRAVIS_PULL_REQUEST_SHA",
"CIRCLE_PULL_REQUEST",
"CIRCLE_PR_NUMBER",
"CIRCLE_BUILD_URL"
] |
[]
|
["TRAVIS_PULL_REQUEST", "CIRCLE_SHA1", "CI_PULL_REQUEST", "TRAVIS_PULL_REQUEST_SHA", "CIRCLE_PULL_REQUEST", "CIRCLE_PR_NUMBER", "CIRCLE_BUILD_URL"]
|
go
| 7 | 0 | |
_data/data_file_generator.py
|
#!/home/ub2/.virtualenvs/blog/bin/python
"""
1. Sanitize all filenames, i.e., remove spaces
2. Check if Markdown with directory name ("${DIR}.md") exists in directory, e.g., 'Python.md' in 'wiki/Python'
a. if not, create such markdown from template and include links to all files and directories
b. if md exists, append links to any files/directories not already included in markdown (except EXCLUDED)
3. If other markdown files exist in directory:
a. sort by (opts: leading numeric numbers, date modified, date created, filename)
b. insert Markdown content into MD_DIR above appended links
4. If sub directory exists in MD_DIR, then apply same function as above with prefix MD_DIR, e.g., 'Python.Sockets.md' in 'wiki/Python/Sockets'
Caveats:
- directory names cannot start with '_' due to jekyll's site generation engine
"""
import re,time
import pandas as pd
import os
def get_file_list():
_files = []
for root, sub_dir, files in os.walk(base_ref_dir):
for f in files:
_files.append(os.path.join(root,f))
df = pd.DataFrame({'fpath':_files})
for it in EXCLUDE_DIRS:
idx = df[df.fpath.str.contains('/%s/' % it)].index.tolist()
df.drop(idx, axis=0, inplace=True)
df = df.reset_index(drop=True)
h_dir = 1
new_col = 'h%s'%h_dir
df['fname'],df[new_col] = zip(*df.fpath.apply(lambda s: '|'.join([s[len(base_ref_dir):].split('/')[-1],s[len(base_ref_dir):].split('/')[0]]).split('|')))
n = df[(df.fname!=df[new_col]) & (df[new_col].isnull()==False)].index.tolist()
while True:
h_dir += 1
new_col = 'h%s'%h_dir
df[new_col] = df[df.index.isin(n)].fpath.map(lambda s: None if len(s[len(base_ref_dir):].split('/'))<h_dir else s[len(base_ref_dir):].split('/')[h_dir-1])
n = df[(df.fname!=df[new_col]) & (df[new_col].isnull()==False)].index.tolist()
if not len(n):
break
df['f_ext'] = df.fname.map(lambda s: s[s.rfind('.')+1:])
return df
def sanitize_filenames(df):
n = df[(df.fname.str.contains(' ')==True)].index.tolist()
nf = df[df.index.isin(n)]
for i,row in nf.iterrows():
f = row.fname
old_fpath = row.fpath
f_dir = old_fpath[:len(f)].rstrip('/')
new_fname = raw_input('\nHit Enter to rename:\n\t"%s" \n\nTO\n\n\t"%s"\n\n else provide new file name: ' % (f,f.replace(' ','_')))
print ''
if new_fname:
new_fpath = os.path.join(f_dir,new_fname)
else:
new_fpath = os.path.join(f_dir,f.replace(' ','_'))
os.system('mv "%s" "%s"' % (old_fpath,new_fpath))
row.fpath = new_fpath
return df
def sort_funct(df,sort_col='fname',sort_type=['leading_numeric', 'case_insensitive']):
"""
any number of the following options:
basic,
leading_numeric,
datetime_modified,
datetime_created,
case_sensitive,
case_insensitive
examples:
sort_funct(['a.txt','b.txt'],'datetime_created')
sort_funct(['a.txt','b.txt'],['leading_numeric','case_sensitive','case_insensitive'])
"""
def date_modified(f_path):
return time.ctime(os.path.getmtime(f_path))
def date_created(f_path):
return time.ctime(os.path.getctime(f_path))
if sort_type!='basic':
sort_cols = ['_sort_case_sensitive',
'_sort_ext',
'_sort_case_insensitive',
'_sort_leading_numeric',
'_sort_datetime_created',
'_sort_datetime_modified']
df['_sort_case_sensitive'] = df[sort_col].map(lambda s: s[s.rfind('/')+1:])
df['_sort_ext'] = df[sort_col].map(lambda s: s[s.rfind('.')+1:])
df['_sort_case_insensitive'] = df[sort_col].map(lambda s: s.lower())
df['_sort_leading_numeric'] = df[sort_col].map(lambda s: None if
not hasattr(re.search('^\d+',s),'group')
else int(re.search('^\d+',s).group()))
df['_sort_datetime_created'] = df.fpath.map(date_created)
df['_sort_datetime_modified'] = df.fpath.map(date_modified)
if type(sort_type)==str:
sort_type = [sort_type]
for it in sort_type:
assert sort_cols.count('_sort_' + it)
mod_sort_type = ['_sort_' + it for it in sort_type]
df.sort_values(by=mod_sort_type,axis=0,inplace=True)
df.drop(sort_cols,axis=1,inplace=True)
return df
else:
return df.sort_values(by=sort_col,axis=0,inplace=True)
def remove_directory_markdowns(df):
h_cols = [it for it in df.columns.tolist() if it[0]=='h' and it[1].isdigit()]
for c in h_cols[1:]:
hf = df[df.fname.map(lambda s: s[:s.rfind('.')]).isin(df[c].unique().tolist())]
f_list = hf.fpath.tolist()
for it in f_list:
os.system('rm -fr %s' % it)
df = get_file_list()
return df
def create_markdown_from_path(new_fpath,sorted_nf):
nf,res = sorted_nf,[]
for i,row in nf.iterrows():
res.append( '- [%s](%s)' % (row.fname.replace('_',' '),row.fpath.replace(base_ref_dir,'',1)) )
res = '---\n' + '\n'.join( res )
with open(new_fpath,'w') as f:
f.write(res)
return True
def create_missing_markdown_indicies(df):
"""
Create markdowns in directory with files
but missing markdown with same title as directory.
"""
h_cols = [it for it in df.columns.tolist() if it[0]=='h' and it[1].isdigit()]
for c in h_cols[1:]:
hf = df[(df[c]!=df.fname) & (df[c].isnull()==False)].sort_values(by=c)
h_unique = hf[c].unique().tolist()
for h in h_unique:
h_title = h
if not len(hf[(hf[c]==h_title) & (hf.fname.isin([h_title+'.md',h_title+'.markdown']))]):
idx = hf[(hf[c]==h_title)].first_valid_index()
f_dir = base_ref_dir + '/'.join(hf.ix[idx,h_cols[:h_cols.index(c)+1]].tolist())
new_fpath = '%s/%s.md' % (f_dir,h_title)
nf = sort_funct(hf[hf[c]==h_title].copy(),sort_col='fname',sort_type=sort_type)
assert create_markdown_from_path(new_fpath,nf)
df = get_file_list()
return df
def create_csv(df):
def get_markdown_content(df,c,h_title):
new_content = []
sub_header_cols = sorted([it for it in df.columns.tolist() if it[0]=='h' and it[1].isdigit()])
expected_fname_h_col = sub_header_cols[sub_header_cols.index(c)+1]
nf = sort_funct( df[(df.f_ext.isin(['md','markdown']) & (df.fname==df[expected_fname_h_col]))].copy(), sort_col='fname')
nf['l_header_val'] = nf['l_header_val'] + 1
if len(nf):
for i,row in nf.iterrows():
new_content.append([row.l_header_val,
row.l_title,
row.l_fname,
row.l_fpath,
row.f_ext])
return new_content
def get_sub_header_content(df,h_title):
new_content = []
sub_header_cols = ['h%s'%it for it in sorted(df.l_header_val.unique().tolist())]
for c in sub_header_cols[1:]:
idx = sub_header_cols.index(c)
nhf = sort_funct( df[(df[c]!=df.fname) &
(df[c].isnull()==False) &
(df.l_header_val==idx+1)
].copy(),sort_col=c)
sub_header_titles = nhf[nhf.l_title==nhf[c]]
# If md_dir exists, create sub header title
# else, create md_dir and create sub header title
if len(sub_header_titles):
for i,row in sub_header_titles.iterrows():
new_content.append([row.l_header_val,
row.l_title,
row.l_fname,
row.l_fpath,
row.f_ext])
else:
n = nhf.first_valid_index()
row = nhf.ix[n,:]
md_dir = row.fpath[:row.fpath.rfind('/')]
md_dir_fname = md_dir[md_dir.rfind('/')+1:] + '.md'
md_dir_title = md_dir_fname[:-3].replace('_',' ').strip()
new_fpath = os.path.join(md_dir,md_dir_fname)
assert create_markdown_from_path(new_fpath,nhf)
new_content.append([row.l_header_val,
md_dir_title,
md_dir_fname,
new_fpath.replace(base_ref_dir,'',1),
'.md'])
return new_content
link_created = []
csv_content = 'idx,header_val,title,fname,fpath,f_ext\n'
csv_templ = ','.join(['%s' for it in csv_content.split(',')])
# Create Headers for Each Directory
# For each Directory:
# with sub-directories
# For each Directory with markdowns
# For each directory with other files not referenced in
# Create Info for CSV out
df['l_fpath'] = df.fpath.map(lambda s: s.replace(base_ref_dir,'',1))
df['l_fname'] = df.fname#.map(lambda s: s if not s.count('.') else s[:s.rfind('.')])
df['l_title'] = df.l_fname.map(lambda s: s.replace('_',' ') if not s.count('.') else s[:s.rfind('.')].replace('_',' '))
df['l_header_val'] = df.apply(lambda s: 1,axis=1)
# Update Header Vals
h_cols = [it for it in df.columns.tolist() if it[0]=='h' and it[1].isdigit()]
h_pt = 1
for c in h_cols[1:]:
h_pt += 1
n = df[df[c]==df.fname].index.tolist()
ndf = df[df.index.isin(n)].copy()
z = ndf.fname.map(lambda d: h_pt-1)
ndf['l_header_val'] = z
df[df.index.isin(n)] = ndf
first_header_col = 'h1'
hf = sort_funct( df[(df[first_header_col]!=df.fname) &
(df[first_header_col].isnull()==False)].copy(),
sort_col=first_header_col)
h_unique = hf[first_header_col].unique().tolist()
h_pt = 0
for h_title in h_unique:
# Add Main Header Row
nf = hf[hf[first_header_col]==h_title].copy()
try:
d = nf[nf.fname==h_title+'.md']
r = d.loc[d.first_valid_index()]
except:
print(''.join(['Could not find "%s" in %s.\n',
'Consider using function ',
'"create_missing_markdown_indicies."'])
% (h_title+'.md',base_ref_dir + h_title))
# create_markdown_from_path('%s/%s/%s.md' % (base_ref_dir,h_title,h_title),nf)
import ipdb as I; I.set_trace()
raise SystemExit
csv_content += str(csv_templ % (h_pt,r.l_header_val,
r.l_title,
r.l_fname,
r.l_fpath,
r.f_ext)).rstrip(',') + '\n'
h_pt += 1
nf.drop(r.name,axis=0,inplace=True)
for it in get_markdown_content(nf,first_header_col,h_title):
csv_content += str(csv_templ % tuple([h_pt] + it) ).rstrip(',') + '\n'
h_pt += 1
for it in get_sub_header_content(nf,h_title):
csv_content += str(csv_templ % tuple([h_pt] + it) ).rstrip(',') + '\n'
h_pt += 1
return csv_content
def make_wiki_csv():
global base_ref_dir,EXCLUDE_DIRS,INCLUDE_EXTENSIONS,df
os.chdir('%s/sethc23.github.io' % os.environ['BD'])
base_ref_dir = '_wiki/'
csv_fpath = '_data/wiki.csv'
sort_type = ['leading_numeric', 'case_insensitive']
remake_directory_markdowns = False
include_everything = False
EXCLUDE_DIRS = ['_site','.git','1_POSTS']
INCLUDE_EXTENSIONS = ['md','markdown']
df = get_file_list()
# df = sanitize_filenames(df)
# df = remove_directory_markdowns(df)
# df = create_missing_markdown_indicies(df)
csv_out = create_csv(df)
r=csv_out.split('\n')
df=pd.DataFrame(columns=r[0].split(','),data=[it.split(',') for it in r[1:-1]])
df['edit_path'] = df.ix[:,['fpath','f_ext']].apply(lambda s: s[0][:len(s[0]) - len(s[1]) - 1],axis=1)
df.to_csv(csv_fpath,index=False)
# print csv_out
def make_blog_csv():
global base_ref_dir,EXCLUDE_DIRS,INCLUDE_EXTENSIONS,df
base_ref_dir = '_POSTS'
os.chdir('%s/sethc23.github.io/' % os.environ['BD'] + '_wiki/')
csv_fpath = '_data/blog.csv'
sort_type = ['leading_numeric', 'case_insensitive']
remake_directory_markdowns = False
include_everything = False
EXCLUDE_DIRS = []
INCLUDE_EXTENSIONS = ['md','markdown']
df = get_file_list()
print "NEEDS WORK"
raise SystemExit
# df = df.ix[:,['fpath','l_fname']]
# csv_out = create_csv(df)
# r=csv_out.split('\n')
# df=pd.DataFrame(columns=r[0].split(','),data=[it.split(',') for it in r[1:-1]])
# df['edit_path'] = df.ix[:,['fpath','f_ext']].apply(lambda s: s[0][:len(s[0]) - len(s[1]) - 1],axis=1)
# df.to_csv(csv_fpath,index=False)
# print csv_out
from sys import argv
if __name__ == '__main__':
args = argv[1:]
if argv[1]=='wiki':
make_wiki_csv()
elif argv[1]=='blog':
make_blog_csv()
|
[] |
[] |
[
"BD"
] |
[]
|
["BD"]
|
python
| 1 | 0 | |
auditing/datacollectors/GenericMqttCollector.py
|
# LoRaWAN Security Framework - GenericMqttCollector
# Copyright (c) 2019 IOActive Inc. All rights reserved.
import sys,argparse, datetime, json, base64, os, traceback, json, re, time,logging
# The MQTT client used and its documentation can be found in https://github.com/eclipse/paho.mqtt.python
import paho.mqtt.client as mqtt
import auditing.datacollectors.utils.PhyParser as phy_parser
from auditing.datacollectors.utils.PacketPersistence import save
if os.environ.get("ENVIRONMENT") == "DEV":
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
def init_packet_writter_message():
packet_writter_message = dict()
packet_writter_message['packet'] = None
packet_writter_message['messages'] = list()
return packet_writter_message
class GenericMqttCollector:
TIMEOUT = 60
def __init__(self, data_collector_id, organization_id, host, port, ssl, user, password, topics):
self.data_collector_id = data_collector_id
self.organization_id = organization_id
self.host = host
self.port = port
self.ssl = ssl
self.user = user
self.password = password
self.topics = topics
self.mqtt_client = None
# The data sent to the MQTT queue, to be written by the packet writer. It must have at least one MQ message
self.packet_writter_message = init_packet_writter_message()
def connect(self):
if self.mqtt_client:
print('Existing connection')
else:
self.mqtt_client = mqtt.Client()
self.mqtt_client.organization_id = self.organization_id
self.mqtt_client.data_collector_id = self.data_collector_id
self.mqtt_client.host = self.host
self.mqtt_client.topics = self.topics
self.mqtt_client.on_connect = on_connect
self.mqtt_client.on_message = on_message
self.mqtt_client.self.packet_writter_message = self.packet_writter_message
self.mqtt_client.reconnect_delay_set(min_delay=10, max_delay=60)
self.mqtt_client.connect_async(self.host, self.port, self.TIMEOUT)
try:
self.mqtt_client.loop_start()
except KeyboardInterrupt:
self.mqtt_client.disconnect()
exit(0)
def disconnect(self):
self.mqtt_client.disconnect()
self.mqtt_client = None
def reconnect(self):
print('reconnection')
def on_message(client, userdata, msg):
try:
payload = msg.payload.decode("utf-8")
standardPacket = {}
# Save this message an topic into MQ
client.packet_writter_message['messages'].append(
{
'topic':msg.topic,
'message':msg.payload.decode("utf-8"),
'data_collector_id': client.data_collector_id
}
)
if len(payload) > 0:
mqttMessage = json.loads(payload)
if 'data' not in mqttMessage:
logging.error('Received a message without "data" field. Topic: %s. Message: %s'%(msg.topic, payload))
return
# Pad the base64 string till it is a multiple of 4
mqttMessage['data'] += "=" * ((4 - len(mqttMessage['data']) % 4) % 4)
# Parse the base64 PHYPayload
standardPacket = phy_parser.setPHYPayload(mqttMessage['data'])
standardPacket['chan'] = mqttMessage.get('chan', None)
standardPacket['stat'] = mqttMessage.get('stat', None)
standardPacket['lsnr'] = mqttMessage.get('lsnr', None)
standardPacket['rssi'] = mqttMessage.get('rssi', None)
standardPacket['tmst'] = mqttMessage.get('tmst', None)
standardPacket['rfch'] = mqttMessage.get('rfch', None)
standardPacket['freq'] = mqttMessage.get('freq', None)
standardPacket['modu'] = mqttMessage.get('modu', None)
standardPacket['datr'] = json.dumps(parse_datr(mqttMessage.get('datr', None)))
standardPacket['codr'] = mqttMessage.get('codr', None)
standardPacket['size'] = mqttMessage.get('size', None)
standardPacket['data'] = mqttMessage.get('data', None)
# Gateway not provided by this broker
standardPacket['gateway'] = None
# These fields come in the /up topic
standardPacket['seqn'] = mqttMessage.get('seqn', None)
standardPacket['opts'] = mqttMessage.get('opts', None)
standardPacket['port'] = mqttMessage.get('port', None)
# These fields are indepedant from the payload
standardPacket['topic'] = msg.topic
if "/joined" in msg.topic:
standardPacket['m_type'] = "JoinAccept"
standardPacket['date'] = datetime.datetime.now().__str__()
standardPacket['dev_eui'] = getDevEUIFromMQTTTopic(msg.topic)
standardPacket['data_collector_id'] = client.data_collector_id
standardPacket['organization_id'] = client.organization_id
client.packet_writter_message['packet']= standardPacket
save(client.packet_writter_message, client.data_collector_id)
logging.debug('Topic: {0}. Message received: {1}'.format(msg.topic, msg.payload.decode("utf-8") ))
# Reset packet_writter_message
client.packet_writter_message = init_packet_writter_message()
except Exception as e:
logging.error("Error creating Packet in GenericMqttCollector:", e, "Topic: ", msg.topic, "Message: ", msg.payload.decode("utf-8"))
def on_connect(client, userdata, flags, rc):
logging.info("Connected to: {} with result code: {}".format(client.host, rc))
client.subscribe(client.topics)
def getDevEUIFromMQTTTopic(topic):
search = re.search('lora/(.*)/', topic)
devEUI= None
if search:
devEUI = search.group(1).replace('-', '')
return devEUI
def parse_datr(encoded_datr):
datr = {}
search = re.search('SF(.*)BW(.*)', encoded_datr)
if search:
datr["spread_factor"] = search.group(1)
datr["bandwidth"] = search.group(2)
return datr
if __name__ == '__main__':
from auditing.db.Models import DataCollector, DataCollectorType, Organization, commit, rollback
print ("\n*****************************************************")
print ("LoRaWAN Security Framework - %s"%(sys.argv[0]))
print ("Copyright (c) 2019 IOActive Inc. All rights reserved.")
print ("*****************************************************\n")
parser = argparse.ArgumentParser(description='This script connects to the mqqt broker and saves messages into the DB. You must specify a unique collectorID and you can specify the topics you want to suscribe to.')
requiredGroup = parser.add_argument_group('Required arguments')
requiredGroup.add_argument('--ip',
help='MQTT broker ip, eg. --ip 192.168.3.101.')
requiredGroup.add_argument('--port',
help='MQTT broker port, eg. --port 623.',
type=int)
parser.add_argument('--collector-id',
help = 'The ID of the dataCollector. This ID will be associated to the packets saved into DB. eg. --id 1')
parser.add_argument('--organization-id',
help = 'The ID of the dataCollector. This ID will be associated to the packets saved into DB. eg. --id 1',
default= None)
parser.add_argument('--topics',
nargs = '+',
help = 'List the topic(s) you want to suscribe separated by spaces. If nothing given, default will be "#.',
default = "#")
options = parser.parse_args()
if options.topics != None:
topics = list()
for topic in options.topics:
topics.append((topic, 0))
# Get the organization
if options.organization_id:
organization_obj = Organization.find_one(options.organization_id)
if organization_obj is None:
print("Organization doesn't exist. Please provide a valid ID")
exit(0)
else:
organization_quant = Organization.count()
if organization_quant > 1:
print("There are more than one organizations in the DB. Provide the Organization DB explicitly.")
elif organization_quant == 1:
organization_obj = Organization.find_one()
else:
organization_obj = Organization(name = "Auto-generated Organization")
organization_obj.save()
# Get the data collector
collector_obj = None
if options.collector_id:
collector_obj = DataCollector.find_one(options.collector_id)
if collector_obj is None:
print("DataCollector doesn't exist. Please provide a valid ID")
exit(0)
else:
if options.ip and options.port:
collector_type_obj = DataCollectorType.find_one_by_type("generic_collector")
if collector_type_obj is None:
collector_type_obj= DataCollectorType(
type = "generic_collector",
name= "generic_collector")
collector_type_obj.save()
collector_obj= DataCollector.find_one_by_ip_port_and_dctype_id(collector_type_obj.id, options.ip, str(options.port))
if collector_obj is None:
collector_obj= DataCollector(
data_collector_type_id= collector_type_obj.id,
name= "Test collector",
organization_id = organization_obj.id,
ip= options.ip,
port= str(options.port))
collector_obj.save()
else:
print('Datacollector IP and port must be provided if not provided a collector ID.')
exit(0)
connector = GenericMqttCollector(
data_collector_id = collector_obj.id,
organization_id = collector_obj.organization_id,
host = collector_obj.ip,
port = int(collector_obj.port),
ssl = None,
user = None,
password = None,
topics = topics)
connector.connect()
while(True):
time.sleep(5)
try:
commit()
logging.debug('Commit done!')
except Exception as exc:
logging.error('Error at commit:', exc)
logging.info('Rolling back the session')
rollback()
|
[] |
[] |
[
"ENVIRONMENT"
] |
[]
|
["ENVIRONMENT"]
|
python
| 1 | 0 | |
iptables/client.go
|
package iptables
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
)
// Client = provider configuration.
type Client struct {
HTTPS bool
Insecure bool
IPv6 bool
Port int
FirewallIP string
Logname string
Login string
Password string
AllowedIPs []interface{}
}
// Rule struc for generate iptables line.
type Rule struct {
Except bool
Fragment bool
Notrack bool
Action string
State string
Icmptype string
Chain string
Proto string
IfaceIn string
IfaceOut string
Iface string
IPSrc string
IPDst string
IPNat string
Sports string
Dports string
Tcpflags1 string
Tcpflags2 string
Position string
NthEvery string
NthPacket string
Logprefix string
Tcpmss string
}
// NewClient configure.
func NewClient(ctx context.Context, c *Config, login string, password string) (*Client, error) {
client := &Client{
FirewallIP: c.firewallIP,
Port: c.firewallPortAPI,
AllowedIPs: c.allowedIPs,
HTTPS: c.https,
Insecure: c.insecure,
Logname: c.logname,
Login: login,
Password: password,
IPv6: c.ipv6Enable,
}
// Allow no default rules
if os.Getenv("CONFIG_IPTABLES_TERRAFORM_NODEFAULT") == "" {
checkExistsRouter, err := client.chainAPIV4(ctx, "router_chain", "GET")
if err != nil {
return nil, err
}
if !checkExistsRouter {
createChain, err := client.chainAPIV4(ctx, "router_chain", "PUT")
if !createChain || err != nil {
return nil, fmt.Errorf("create chain router failed : %s", err)
}
}
// Add AllowedIPs on TCP Firewal_IP:Port
for _, cidr := range client.AllowedIPs {
// raw notrack on Firewal_IP:Port
acceptAPI := Rule{
Action: "CT",
Chain: "PREROUTING",
Proto: "tcp",
IfaceIn: "*",
IfaceOut: "*",
IPSrc: strings.ReplaceAll(cidr.(string), "/", "_"),
IPDst: client.FirewallIP,
Sports: "0",
Dports: strconv.Itoa(client.Port),
Tcpflags1: "SYN,RST,ACK,FIN",
Tcpflags2: "SYN",
Notrack: true,
Position: "?",
}
routeexists, err := client.rawAPIV4(ctx, acceptAPI, "GET")
if err != nil {
return nil, fmt.Errorf("check rules (raw) allowed IP for API for cidr %s failed : %s", cidr.(string), err)
}
if !routeexists {
routeCIDR, err := client.rawAPIV4(ctx, acceptAPI, "PUT")
if !routeCIDR || err != nil {
return nil, fmt.Errorf("create rules (raw) allowed IP for API for cidr %s failed : %s", cidr.(string), err)
}
}
// ingress on Firewal_IP:Port
acceptAPI = Rule{
Action: "ACCEPT",
Chain: "router_chain",
Proto: "tcp",
IfaceIn: "*",
IfaceOut: "*",
IPSrc: strings.ReplaceAll(cidr.(string), "/", "_"),
IPDst: client.FirewallIP,
Sports: "0",
Dports: strconv.Itoa(client.Port),
Position: "?",
}
routeexists, err = client.rulesAPIV4(ctx, acceptAPI, "GET")
if err != nil {
return nil, fmt.Errorf("check rules (ingress) allowed IP for API for cidr %s failed : %s", cidr.(string), err)
}
if !routeexists {
routeCIDR, err := client.rulesAPIV4(ctx, acceptAPI, "PUT")
if !routeCIDR || err != nil {
return nil, fmt.Errorf("create rules (ingress) allowed IP for API for cidr %s failed : %s", cidr.(string), err)
}
}
// egress on Firewal_IP:Port
acceptAPI = Rule{
Action: "ACCEPT",
Chain: "router_chain",
Proto: "tcp",
IfaceIn: "*",
IfaceOut: "*",
IPSrc: client.FirewallIP,
IPDst: strings.ReplaceAll(cidr.(string), "/", "_"),
Sports: strconv.Itoa(client.Port),
Dports: "0",
Position: "?",
}
routeexists, err = client.rulesAPIV4(ctx, acceptAPI, "GET")
if err != nil {
return nil, fmt.Errorf("check rules (egress) allowed IP for API for cidr %s failed : %s", cidr.(string), err)
}
if !routeexists {
routeCIDR, err := client.rulesAPIV4(ctx, acceptAPI, "PUT")
if !routeCIDR || err != nil {
return nil, fmt.Errorf("create rules (egress) allowed IP for API for cidr %s failed : %s", cidr.(string), err)
}
}
}
// Add rules for default chain
defaultTable := []string{"INPUT", "FORWARD", "OUTPUT"}
for _, table := range defaultTable {
routeDefault := Rule{
Action: "router_chain",
Chain: table,
Proto: "all",
IfaceIn: "*",
IfaceOut: "*",
IPSrc: "0.0.0.0_0",
IPDst: "0.0.0.0_0",
Sports: "0",
Dports: "0",
Position: "?",
}
ruleexists, err := client.rulesAPIV4(ctx, routeDefault, "GET")
if err != nil {
return nil, fmt.Errorf("check default rules %s failed : %s", table, err)
}
if !ruleexists {
resp, err := client.rulesAPIV4(ctx, routeDefault, "PUT")
if !resp || err != nil {
return nil, fmt.Errorf("create default rules %s failed : %s", table, err)
}
}
if !c.noAddDefaultDrop {
ruleDrop := Rule{
Action: "DROP",
Chain: table,
Proto: "all",
IfaceIn: "*",
IfaceOut: "*",
IPSrc: "0.0.0.0_0",
IPDst: "0.0.0.0_0",
Sports: "0",
Dports: "0",
Position: "?",
}
ruleexists, err = client.rulesAPIV4(ctx, ruleDrop, "GET")
if err != nil {
return nil, fmt.Errorf("check default rules drop %s failed : %s", table, err)
}
if !ruleexists {
resp, err := client.rulesAPIV4(ctx, ruleDrop, "PUT")
if !resp || err != nil {
return nil, fmt.Errorf("create default rules drop %s failed : %s", table, err)
}
}
}
}
if c.ipv6Enable {
checkExistsRouter, err := client.chainAPIV6(ctx, "router_chain", "GET")
if err != nil {
return nil, err
}
if !checkExistsRouter {
createChain, err := client.chainAPIV6(ctx, "router_chain", "PUT")
if !createChain || err != nil {
return nil, fmt.Errorf("create chain router v6 failed : %s", err)
}
}
for _, table := range defaultTable {
routeDefault := Rule{
Action: "router_chain",
Chain: table,
Proto: "all",
IfaceIn: "*",
IfaceOut: "*",
IPSrc: "::_0",
IPDst: "::_0",
Sports: "0",
Dports: "0",
Position: "?",
}
ruleexists, err := client.rulesAPIV6(ctx, routeDefault, "GET")
if err != nil {
return nil, fmt.Errorf("check default rules v6 %s failed : %s", table, err)
}
if !ruleexists {
resp, err := client.rulesAPIV6(ctx, routeDefault, "PUT")
if !resp || err != nil {
return nil, fmt.Errorf("create default rules v6 %s failed : %s", table, err)
}
}
if !c.noAddDefaultDrop {
ruleDrop := Rule{
Action: "DROP",
Chain: table,
Proto: "all",
IfaceIn: "*",
IfaceOut: "*",
IPSrc: "::_0",
IPDst: "::_0",
Sports: "0",
Dports: "0",
Position: "?",
}
ruleexists, err = client.rulesAPIV6(ctx, ruleDrop, "GET")
if err != nil {
return nil, fmt.Errorf("check default rules drop v6 %s failed : %s", table, err)
}
if !ruleexists {
resp, err := client.rulesAPIV6(ctx, ruleDrop, "PUT")
if !resp || err != nil {
return nil, fmt.Errorf("create default rules drop v6 %s failed : %s", table, err)
}
}
}
}
}
}
return client, nil
}
func (client *Client) newRequest(ctx context.Context, method string, uriString string) (*http.Request, error) {
IP := client.FirewallIP
port := strconv.Itoa(client.Port)
matched := strings.Contains(uriString, "?")
var urLString string
if matched {
urLString = "http://" + IP + ":" + port + uriString + "&logname=" + client.Logname
} else {
urLString = "http://" + IP + ":" + port + uriString + "?&logname=" + client.Logname
}
if client.HTTPS {
urLString = strings.ReplaceAll(urLString, "http://", "https://")
}
req, err := http.NewRequestWithContext(ctx, method, urLString, nil)
if client.Login != "" && client.Password != "" {
req.SetBasicAuth(client.Login, client.Password)
}
log.Printf("[INFO] New API request: %s %s", method, urLString)
if err != nil {
return nil, fmt.Errorf("error during creation of request: %s", err)
}
return req, nil
}
func (client *Client) rulesAPI(ctx context.Context, version string, rule Rule, method string) (bool, error) {
var uriString []string
if version == "v4" {
uriString = append(uriString, "/rules/")
}
if version == "v6" {
uriString = append(uriString, "/rules_v6/")
}
uriString = append(uriString, rule.Action, "/", rule.Chain, "/", rule.Proto, "/", rule.IfaceIn, "/", rule.IfaceOut,
"/", rule.IPSrc, "/", rule.IPDst, "/")
if (rule.Sports != "0") || (rule.Dports != "0") || (rule.State != "") ||
(rule.Icmptype != "") || rule.Fragment || (rule.Position != "?") || (rule.Logprefix != "") {
uriString = append(uriString, "?")
if rule.Sports != "0" {
uriString = append(uriString, "&sports=", rule.Sports)
}
if rule.Dports != "0" {
uriString = append(uriString, "&dports=", rule.Dports)
}
if rule.State != "" {
uriString = append(uriString, "&state=", rule.State)
}
if rule.Icmptype != "" {
uriString = append(uriString, "&icmptype=", rule.Icmptype)
}
if rule.Fragment {
uriString = append(uriString, "&fragment=true")
}
if rule.Position != "?" {
uriString = append(uriString, "&position=", rule.Position)
}
if rule.Logprefix != "" {
uriString = append(uriString, "&log-prefix=", rule.Logprefix)
}
}
req, err := client.newRequest(ctx, method, strings.Join(uriString, ""))
if err != nil {
return false, err
}
tr := &http.Transport{
DisableKeepAlives: true,
}
if client.Insecure {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
DisableKeepAlives: true,
}
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
log.Printf("error when do request %s", err)
return false, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
log.Printf("[INFO] Response API request %d %s", resp.StatusCode, string(body))
if resp.StatusCode == http.StatusOK {
return true, nil
}
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
if resp.StatusCode == http.StatusConflict {
return false, errors.New("conflict with position")
}
return false, errors.New(string(body))
}
func (client *Client) natAPI(ctx context.Context, version string, rule Rule, method string) (bool, error) {
var uriString []string
if version == "v4" {
uriString = append(uriString, "/nat/")
}
if version == "v6" {
uriString = append(uriString, "/nat_v6/")
}
uriString = append(uriString, rule.Action, "/", rule.Chain, "/", rule.Proto, "/", rule.Iface, "/",
rule.IPSrc, "/", rule.IPDst, "/", rule.IPNat, "/")
if (rule.Dports != "0") || (rule.Position != "?") || (rule.NthEvery != "") || rule.Except {
uriString = append(uriString, "?")
if rule.Dports != "0" {
uriString = append(uriString, "&dport=", rule.Dports)
}
if rule.Position != "?" {
uriString = append(uriString, "&position=", rule.Position)
}
if rule.NthEvery != "" {
uriString = append(uriString, "&nth_every=", rule.NthEvery, "&nth_packet=", rule.NthPacket)
}
if rule.Except {
uriString = append(uriString, "&except=true")
}
}
req, err := client.newRequest(ctx, method, strings.Join(uriString, ""))
if err != nil {
return false, err
}
tr := &http.Transport{
DisableKeepAlives: true,
}
if client.Insecure {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
DisableKeepAlives: true,
}
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
log.Printf("rrror when do request %s", err)
return false, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
log.Printf("[INFO] Response API request %d %s", resp.StatusCode, string(body))
if resp.StatusCode == http.StatusOK {
return true, nil
}
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
if resp.StatusCode == http.StatusConflict {
return false, errors.New("conflict with position")
}
return false, errors.New(string(body))
}
func (client *Client) rawAPI(ctx context.Context, version string, rule Rule, method string) (bool, error) {
var uriString []string
if version == "v4" {
uriString = append(uriString, "/raw/")
}
if version == "v6" {
uriString = append(uriString, "/raw_v6/")
}
uriString = append(uriString, rule.Action, "/", rule.Chain, "/", rule.Proto, "/", rule.IfaceIn, "/",
rule.IfaceOut, "/", rule.IPSrc, "/", rule.IPDst, "/")
if (rule.Sports != "0") || (rule.Dports != "0") || (rule.Tcpflags1 != "") || (rule.Tcpflags2 != "") ||
rule.Notrack || (rule.Position != "?") || (rule.Logprefix != "") || (rule.Tcpmss != "") {
uriString = append(uriString, "?")
if rule.Sports != "0" {
uriString = append(uriString, "&sports=", rule.Sports)
}
if rule.Dports != "0" {
uriString = append(uriString, "&dports=", rule.Dports)
}
if (rule.Tcpflags1 != "") && (rule.Tcpflags2 != "") {
uriString = append(uriString, "&tcpflag1=", rule.Tcpflags1, "&tcpflag2=", rule.Tcpflags2)
}
if rule.Notrack {
uriString = append(uriString, "¬rack=true")
}
if rule.Position != "?" {
uriString = append(uriString, "&position=", rule.Position)
}
if rule.Logprefix != "" {
uriString = append(uriString, "&log-prefix=", rule.Logprefix)
}
if rule.Tcpmss != "" {
uriString = append(uriString, "&tcpmss=", rule.Tcpmss)
}
}
req, err := client.newRequest(ctx, method, strings.Join(uriString, ""))
if err != nil {
return false, err
}
tr := &http.Transport{
DisableKeepAlives: true,
}
if client.Insecure {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
DisableKeepAlives: true,
}
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
log.Printf("error when do request %s", err)
return false, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
log.Printf("[INFO] Response API request %d %s", resp.StatusCode, string(body))
if resp.StatusCode == http.StatusOK {
return true, nil
}
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
if resp.StatusCode == http.StatusConflict {
return false, errors.New("conflict with position")
}
return false, errors.New(string(body))
}
func (client *Client) chainAPI(ctx context.Context, version string, chain string, method string) (bool, error) {
var uriString []string
if version == "v4" {
uriString = append(uriString, "/chain/filter/")
}
if version == "v6" {
uriString = append(uriString, "/chain_v6/filter/")
}
uriString = append(uriString, chain, "/")
req, err := client.newRequest(ctx, method, strings.Join(uriString, ""))
if err != nil {
return false, err
}
tr := &http.Transport{
DisableKeepAlives: true,
}
if client.Insecure {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
DisableKeepAlives: true,
}
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
log.Printf("error when do request %s", err)
return false, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
log.Printf("[INFO] Response API request %d %s", resp.StatusCode, string(body))
if resp.StatusCode == http.StatusOK {
return true, nil
}
if resp.StatusCode == http.StatusBadRequest {
return false, nil
}
if resp.StatusCode == http.StatusUnauthorized {
return false, errors.New(strings.Join([]string{client.FirewallIP, ": You are Unauthorized"}, " "))
}
return false, errors.New(string(body))
}
func (client *Client) save(ctx context.Context, version string) error {
var uriString []string
if version == "v4" {
uriString = append(uriString, "/save/")
}
if version == "v6" {
uriString = append(uriString, "/save_v6/")
}
req, err := client.newRequest(ctx, "GET", strings.Join(uriString, ""))
if err != nil {
return err
}
tr := &http.Transport{
DisableKeepAlives: true,
}
if client.Insecure {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
DisableKeepAlives: true,
}
}
httpClient := &http.Client{Transport: tr}
resp, err := httpClient.Do(req)
if err != nil {
log.Printf("error when do request %s", err)
return err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
log.Printf("[INFO] Response API request %d %s", resp.StatusCode, string(body))
if resp.StatusCode != http.StatusOK {
return errors.New(string(body))
}
return nil
}
func (client *Client) chainAPIV4(ctx context.Context, chain string, method string) (bool, error) {
return client.chainAPI(ctx, "v4", chain, method)
}
func (client *Client) rulesAPIV4(ctx context.Context, rule Rule, method string) (bool, error) {
return client.rulesAPI(ctx, "v4", rule, method)
}
func (client *Client) natAPIV4(ctx context.Context, rule Rule, method string) (bool, error) {
return client.natAPI(ctx, "v4", rule, method)
}
func (client *Client) rawAPIV4(ctx context.Context, rule Rule, method string) (bool, error) {
return client.rawAPI(ctx, "v4", rule, method)
}
func (client *Client) saveV4(ctx context.Context) error {
return client.save(ctx, "v4")
}
func (client *Client) chainAPIV6(ctx context.Context, chain string, method string) (bool, error) {
return client.chainAPI(ctx, "v6", chain, method)
}
func (client *Client) rulesAPIV6(ctx context.Context, rule Rule, method string) (bool, error) {
return client.rulesAPI(ctx, "v6", rule, method)
}
func (client *Client) natAPIV6(ctx context.Context, rule Rule, method string) (bool, error) {
return client.natAPI(ctx, "v6", rule, method)
}
func (client *Client) rawAPIV6(ctx context.Context, rule Rule, method string) (bool, error) {
return client.rawAPI(ctx, "v6", rule, method)
}
func (client *Client) saveV6(ctx context.Context) error {
return client.save(ctx, "v6")
}
|
[
"\"CONFIG_IPTABLES_TERRAFORM_NODEFAULT\""
] |
[] |
[
"CONFIG_IPTABLES_TERRAFORM_NODEFAULT"
] |
[]
|
["CONFIG_IPTABLES_TERRAFORM_NODEFAULT"]
|
go
| 1 | 0 | |
nz_django/day1/urlparams_demo/urlparams_demo/wsgi.py
|
"""
WSGI config for urlparams_demo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "urlparams_demo.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
setup.py
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
#
# Copyright 2018 EMVA
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Standard library imports
import os
import setuptools
from distutils import log
import sys
# Related third party imports
# Local application/library specific imports
import versioneer as versioneer
#
log.set_verbosity(log.DEBUG)
log.info('Entered setup.py')
log.info('$PATH=%s' % os.environ['PATH'])
# Check the Python version:
supported_versions = [(3, 4), (3, 5), (3, 6)]
if sys.version_info in supported_versions:
raise RuntimeError(
'See https://github.com/genicam/harvesters#requirements'
)
with open('README.rst', 'r',encoding='utf-8_sig') as fh:
__doc__ = fh.read()
description = '🌈 Friendly Image Acquisition Library for Computer Vision People'
# Determine the base directory:
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, 'src')
# Make our package importable when executing setup.py;
# the package is located in src_dir:
sys.path.insert(0, src_dir)
setuptools.setup(
# The author of the package:
author='The GenICam Committee',
author_email='[email protected]',
# Tells the index and pip some additional metadata about our package:
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
),
# A short, on-sentence summary of the package:
description=description,
# Location where the package may be downloaded:
download_url='https://pypi.org/project/harvesters/',
# A list of required Python modules:
install_requires=['genicam>=1', 'numpy'],
#
license='Apache Software License V2.0',
# A detailed description of the package:
long_description=__doc__,
# The index to tell what type of markup is used for the long description:
long_description_content_type='text/x-rst',
# The name of the package:
name='harvesters',
# A list of all Python import packages that should be included in the
# distribution package:
packages=setuptools.find_packages(where='src'),
# Keys: Package names; an empty name stands for the root package.
# Values: Directory names relative to the setup.py.
package_dir={
'': 'src'
},
# Keys: Package names.
# Values: A list of globs.
# All the files that match package_data will be added to the MANIFEST
# file if no template is provided:
package_data={
'harvesters': [
os.path.join(
'logging', '*.ini'
),
os.path.join(
'test', 'xml', '*.xml'
),
os.path.join(
'test', 'xml', '*.zip'
),
]
},
# A list of supported platforms:
platforms='any',
#
provides=['harvesters'],
# The URL for the website of the project:
url='https://github.com/genicam/harvesters',
# The package version:
version=versioneer.get_version(),
)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
internal/command/init.go
|
package command
import (
"encoding/json"
"github.com/opencontainers/runc/libcontainer"
_ "github.com/opencontainers/runc/libcontainer/nsenter"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"io/ioutil"
"os"
"runtime"
)
func init() {
if len(os.Args) > 1 && os.Args[1] == "init" {
runtime.GOMAXPROCS(1)
runtime.LockOSThread()
unmountPaths()
factory, _ := libcontainer.New("")
if err := factory.StartInitialization(); err != nil {
logrus.Fatal(err)
}
panic("--this line should have never been executed, congratulations--")
}
}
func unmountPaths() {
//make mount point "/" private
flag := unix.MS_PRIVATE
_ = unix.Mount("", "/", "", uintptr(flag), "")
//get the config path
containerRoot := os.Getenv("_LIBCONTAINER_STATEDIR")
if containerRoot == "" {
return
}
configPath := containerRoot + "/config"
file, err := os.Open(configPath)
if err != nil {
panic(err)
}
defer file.Close()
cp, err := ioutil.ReadAll(file)
var options execOptions
cf, err := os.Open(string(cp))
if err != nil {
panic(err)
}
defer cf.Close()
err = json.NewDecoder(cf).Decode(&options.specConfig)
if err != nil {
panic(err)
}
// execute unmount
for _, unmountPath := range options.specConfig.UnmountPaths {
_ = unix.Unmount(unmountPath, 0)
}
}
|
[
"\"_LIBCONTAINER_STATEDIR\""
] |
[] |
[
"_LIBCONTAINER_STATEDIR"
] |
[]
|
["_LIBCONTAINER_STATEDIR"]
|
go
| 1 | 0 | |
db_orm_models/blocking/tests/test_models/base.py
|
import os
from flask_sqlalchemy import SQLAlchemy
import http_server_pkg
class BlockingDBObjectsBaseTestCase(object):
def setUp(self, *args, **kwargs):
http_server_pkg.app.testing = True
http_server_pkg.app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['TEST_DATABASE_URL']
self.test_db_obj = SQLAlchemy(http_server_pkg.app)
self.test_session = self.test_db_obj.session()
def tearDown(self):
self.test_session.rollback()
self.test_session.close()
def test_retrieve_table_data_by_id(self):
ids_to_get = [
]
ids_to_get_string = u"all"
test_validated_get_rqst_params = {
}
test_errors = []
retrieved_test_table_data = self.db_model.retrieve_table_data_by_id(
self.test_session,
test_validated_get_rqst_params,
ids_to_get_string,
ids_to_get,
test_errors
)
self.assertEqual(len(test_errors), 0, "{}".format(test_errors))
self.assertNotEqual(len(retrieved_test_table_data), 0, "{}".format([retrieved_test_table_data]))
|
[] |
[] |
[
"TEST_DATABASE_URL"
] |
[]
|
["TEST_DATABASE_URL"]
|
python
| 1 | 0 | |
day18/01.py
|
from copy import deepcopy
from typing import NamedTuple, List
from collections import Counter
class Point(NamedTuple):
x: int
y: int
def __add__(self, other: 'Point'):
return Point(self.x + other.x, self.y + other.y)
grid = []
with open('01.txt') as f:
for line in f.readlines():
grid.append(list(line.strip()))
def iterate_lights(old_grid: List[List[str]]) -> List[List[str]]:
x_max = len(grid[0])
y_max = len(grid)
directions = [Point(x, y) for x in (-1, 0, 1) for y in (-1, 0, 1) if Point(x, y) != Point(0, 0)]
new_grid = deepcopy(old_grid)
search_area = [Point(x, y) for y in range(y_max) for x in range(x_max)]
for p in search_area:
lights = Counter([old_grid[sp.y][sp.x] for sp in
filter(lambda dp: 0 <= dp.x < x_max and 0 <= dp.y < y_max, [p + d for d in directions])])
if new_grid[p.y][p.x] == '#':
if 2 <= lights['#'] <= 3:
new_grid[p.y][p.x] = '#'
else:
new_grid[p.y][p.x] = '.'
else:
if lights['#'] == 3:
new_grid[p.y][p.x] = '#'
return new_grid
def iterate_lights_part2(old_grid: List[List[str]]) -> List[List[str]]:
x_max = len(grid[0])
y_max = len(grid)
old_grid[0][0] = '#'
old_grid[0][x_max-1] = '#'
old_grid[y_max-1][0] = '#'
old_grid[y_max-1][x_max-1] = '#'
directions = [Point(x, y) for x in (-1, 0, 1) for y in (-1, 0, 1) if Point(x, y) != Point(0, 0)]
new_grid = deepcopy(old_grid)
search_area = [Point(x, y) for y in range(y_max) for x in range(x_max)]
for p in search_area:
lights = Counter([old_grid[sp.y][sp.x] for sp in
filter(lambda dp: 0 <= dp.x < x_max and 0 <= dp.y < y_max, [p + d for d in directions])])
if new_grid[p.y][p.x] == '#':
if 2 <= lights['#'] <= 3:
new_grid[p.y][p.x] = '#'
else:
new_grid[p.y][p.x] = '.'
else:
if lights['#'] == 3:
new_grid[p.y][p.x] = '#'
new_grid[0][0] = '#'
new_grid[0][x_max-1] = '#'
new_grid[y_max-1][0] = '#'
new_grid[y_max-1][x_max-1] = '#'
return new_grid
def print_grid(grid_data):
for data in grid_data:
print(''.join(data))
print()
if __name__ == '__main__':
part1_grid = deepcopy(grid)
for _ in range(100):
part1_grid = iterate_lights(part1_grid)
g = []
for li in part1_grid:
g.extend(li)
print('Part 1:', Counter(g)['#'])
for _ in range(100):
grid = iterate_lights_part2(deepcopy(grid))
g = []
for li in grid:
g.extend(li)
print('Part 2:', Counter(g)['#'])
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
docs/conf.py
|
import sys
import os
# Provide path to the python modules we want to run autodoc on
sys.path.insert(0, os.path.abspath('../qp'))
# Avoid imports that may be unsatisfied when running sphinx, see:
# http://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule#15912502
autodoc_mock_imports = ["scipy","scipy.interpolate"]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode']
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
# only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so
# no need to specify it.
master_doc = 'index'
autosummary_generate = True
autoclass_content = "class"
autodoc_default_flags = ["members", "no-special-members"]
html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'], }
project = u'qp'
author = u'Alex Malz and Phil Marshall'
copyright = u'2016, ' + author
version = "0.1"
release = "0.1.0"
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
tracardi/service/storage/index.py
|
from datetime import datetime
import os
class Index:
def __init__(self, multi_index, index, mapping, rel):
self.multi_index = multi_index
self.rel = rel
self.index = index
self.prefix = "{}-".format(os.environ['INSTANCE_PREFIX']) if 'INSTANCE_PREFIX' in os.environ and os.environ['INSTANCE_PREFIX'] else ''
self.mapping = mapping
def _index(self):
return self.prefix + self.index
def get_read_index(self):
if self.multi_index is False:
return self._index()
return self._index() + "*"
def get_write_index(self):
if self.multi_index is False:
return self._index()
date = datetime.now()
return self._index() + f"-{date.year}-{date.month}"
class Resource:
def __init__(self):
self.resources = {
"tracardi-pro": Index(multi_index=False, index="tracardi-pro", mapping="mappings/tracardi-pro-index.json",
rel=None),
"project": Index(multi_index=False, index="tracardi-flow-project", mapping=None, rel=None),
"action": Index(multi_index=False, index="tracardi-flow-action-plugins",
mapping="mappings/plugin-index.json", rel=None),
"token": Index(multi_index=False, index="tracardi-token", mapping="mappings/token-index.json", rel=None),
"resource": Index(multi_index=False, index="tracardi-resource", mapping="mappings/resource-index.json",
rel=None),
"event-source": Index(multi_index=False, index="tracardi-event-source",
mapping="mappings/event-source-index.json",
rel=None),
"session": Index(multi_index=True, index="tracardi-session", mapping="mappings/session-index.json",
rel='profile.id'),
"profile": Index(multi_index=True, index="tracardi-profile", mapping="mappings/profile-index.json",
rel='_id'),
"event": Index(multi_index=True, index="tracardi-event", mapping="mappings/event-index.json", rel=None),
"flow": Index(multi_index=False, index="tracardi-flow", mapping="mappings/flow-index.json", rel=None),
"rule": Index(multi_index=False, index="tracardi-rule", mapping="mappings/rule-index.json", rel=None),
"segment": Index(multi_index=False, index="tracardi-segment", mapping="mappings/segment-index.json",
rel=None),
"console-log": Index(multi_index=False, index="tracardi-console-log",
mapping="mappings/console-log-index.json", rel=None),
"stat-log": Index(multi_index=False, index="tracardi-stat-log", mapping=None, rel=None),
"debug-info": Index(multi_index=False, index="tracardi-debug-info",
mapping="mappings/debug-info-index.json", rel=None),
"api-instance": Index(multi_index=False, index="tracardi-api-instance",
mapping="mappings/api-instance-index.json", rel=None),
"task": Index(multi_index=False, index="tracardi-task", mapping="mappings/task-index.json", rel=None),
"profile-purchase": Index(multi_index=False, index="tracardi-profile-purchase",
mapping="mappings/profile-purchase-index.json", rel=None),
"event-tags": Index(multi_index=False, index="tracardi-event-tags", mapping="mappings/tag-index.json",
rel=None),
"consent-type": Index(multi_index=False, index="tracardi-consent-type", mapping="mappings/consent-type.json",
rel=None),
"user": Index(multi_index=False, index="tracardi-user", mapping="mappings/user-index.json", rel=None),
"validation-schema": Index(multi_index=False, index="tracardi-validation-schema",
mapping="mappings/validation-schema-index.json", rel=None)
}
def add_indices(self, indices: dict):
for name, index in indices.items():
if not isinstance(index, Index):
raise ValueError("Index must be Index object. {} given".format(type(index)))
if name in self.resources:
raise ValueError("Index `{}` already exist. Check the setup process and defined resources.".format(name))
self.resources[name] = index
self.resources.update(indices)
def __getitem__(self, item):
return self.resources[item]
def __contains__(self, item):
return item in self.resources
resources = Resource()
|
[] |
[] |
[
"INSTANCE_PREFIX"
] |
[]
|
["INSTANCE_PREFIX"]
|
python
| 1 | 0 | |
training/horovod/base/horovod_wrapper.py
|
import collections
import datetime
import json
import multiprocessing
import os
import subprocess
import sys
import time
_SSHD_BINARY_PATH = "/usr/sbin/sshd"
EnvironmentConfig = collections.namedtuple(
"EnvironmentConfig",
["hosts", "port", "is_chief", "pools", "job_id"])
class DeadlineExceededError(Exception):
"""Indicates an action took too long."""
pass
def _sub_process_num_gpus(unused):
del unused
# This is imported here so that we don't load tensorflow in the parent
# process. Once the sub-process exits, it releases its allocated GPU memory.
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == "GPU"]
return len(gpus)
def _get_available_gpus():
"""Returns the number of GPUs on the machine."""
pool = multiprocessing.Pool(1)
result = pool.map(_sub_process_num_gpus, [None])[0]
pool.close()
pool.join()
return result
def parse_environment_config(env_config_str, job_id):
"""Parses environment config and returns a list of hosts as well as the role.
Returns:
An EnvironmentConfig.
"""
if env_config_str:
ssh_port = -1
env_config_json = json.loads(env_config_str)
cluster = env_config_json.get("cluster")
if not cluster:
return None, True
hosts = []
pools = collections.defaultdict(list)
for pool_type, tasks_per_type in cluster.items():
if pool_type == "master":
pool_type = "chief"
for host_and_port in tasks_per_type:
host, port = host_and_port.split(":")
if host == "127.0.0.1":
host = "localhost"
port = int(port)
if ssh_port == -1:
ssh_port = port
elif ssh_port != port:
raise ValueError("Inconsistent ssh ports across tasks %d != %d." %
(ssh_port, port))
hosts.append(host)
pools[pool_type].append(host)
is_chief = False
has_chief = "chief" in pools
if (env_config_json["task"]["type"] == "master" or
env_config_json["task"]["type"] == "chief"):
is_chief = True
if int(env_config_json["task"]["index"]) != 0:
raise ValueError("Only one master node is expected.")
elif ((not has_chief) and
(env_config_json["task"]["type"] == "worker") and
int(env_config_json["task"]["index"]) == 0):
is_chief = True
pools["chief"].append(pools["worker"].pop(0))
elif env_config_json["task"]["type"] != "worker":
raise ValueError("Unexpected task type for Horovod training: %s." %
env_config_json["task"]["type"])
return EnvironmentConfig(hosts=hosts, port=port, is_chief=is_chief,
pools=pools, job_id=job_id)
else:
return EnvironmentConfig(hosts=["localhost"], port=2222, is_chief=True,
pools={"chief": ["localhost"]}, job_id=job_id)
def start_ssh_server(port, is_chief):
ssh_server_command = [_SSHD_BINARY_PATH, "-p", str(port)]
if not is_chief:
ssh_server_command.append("-D")
completed = subprocess.call(ssh_server_command)
if completed != 0:
raise OSError("SSH server did not start successfully.")
def wait_for_ssh_servers(hosts, port, timeout_seconds):
deadline_datetime = datetime.datetime.utcnow() + datetime.timedelta(
seconds=timeout_seconds)
unavailable_hosts = []
while datetime.datetime.utcnow() < deadline_datetime:
unavailable_hosts = []
for host in hosts:
ssh_command = ["ssh", "-q", host, "-p", str(port), "true"]
result = subprocess.call(ssh_command)
if result != 0:
unavailable_hosts.append(host)
if not unavailable_hosts:
return
# Retry in 1 second.
time.sleep(1)
raise DeadlineExceededError(
"Timed out while waiting for all hosts to start. "
"Hosts still not available: %s. TASK_STARTUP_TIMEOUT_SECONDS=%d" %
(unavailable_hosts, timeout_seconds))
def run_horovod(env_config, jobs_per_host, args):
env = dict(os.environ)
if jobs_per_host == 8:
# Workaround for https://github.com/NVIDIA/nccl/issues/262
env["CUDA_VISIBLE_DEVICES"] = '0,1,3,2,7,6,4,5'
num_jobs = len(env_config.hosts) * jobs_per_host
hosts = ",".join("%s:%d" % (h, jobs_per_host) for h in env_config.hosts)
horovod_command = [
"horovodrun", "--ssh-port", str(env_config.port), "-H",
hosts, "--num-proc", str(num_jobs)
]
horovod_command.extend(args)
exit_code = subprocess.call(horovod_command, env=env)
return exit_code
def benchmark_network(env_config):
if not env_config.pools["worker"]:
raise ValueError("No workers in the pool to do network benchmarking.")
iperf_server = ["iperf", "-s", "-p", "3000"]
server = subprocess.Popen(iperf_server)
# Wait 10 seconds for the local server to start.
time.sleep(10)
iperf_command = ["ssh", "-q", env_config.pools["worker"][0], "-p",
str(env_config.port),
"iperf", "-p", "3000", "-c", env_config.pools["chief"][0]]
subprocess.call(iperf_command)
server.kill()
def copy_files_recursively(src, dest):
if not dest.startswith("gs://"):
try:
os.makedirs(dest)
except OSError:
pass
copy_cmd = ["gsutil", "-m", "rsync", "-r", src, dest]
exit_code = subprocess.call(copy_cmd)
if exit_code != 0:
raise RuntimeError("Error while copying %s to %s" % (src, dest))
return exit_code
def main():
env_config_str = os.environ.get("TF_CONFIG")
job_id = os.environ.get("CLOUD_ML_JOB_ID", "localrun")
env_config = parse_environment_config(env_config_str, job_id)
print (env_config, env_config.pools, env_config.hosts, os.environ)
if os.environ.get("STAGE_GCS_PATH", False):
copy_files_recursively(
os.environ.get("STAGE_GCS_PATH"),
os.environ.get("STAGING_DIR", "/input"))
start_ssh_server(env_config.port, env_config.is_chief)
max_num_retries = os.environ.get("NUM_HOROVOD_RETRIES", 10)
if env_config.is_chief:
exit_code = 0
for retry in range(max_num_retries):
staging_timeout_seconds = int(
os.environ.get("TASK_STARTUP_TIMEOUT_SECONDS", 600))
wait_for_ssh_servers(env_config.hosts, env_config.port,
staging_timeout_seconds)
if os.environ.get("BENCHMARK_NETWORK", False):
benchmark_network(env_config)
num_gpus = _get_available_gpus()
# If there are no GPUs, we can just run single process per machine.
jobs_per_host = max(1, num_gpus)
args = sys.argv[1:]
exit_code = run_horovod(env_config=env_config, jobs_per_host=jobs_per_host,
args=args)
if exit_code == 0:
break
else:
print ("Retrying...", retry, "out of", max_num_retries)
if os.environ.get("GCS_OUTPUT_PATH", False):
copy_files_recursively(
os.environ.get("OUTPUT_DIR", "/output"),
os.path.join(os.environ.get("GCS_OUTPUT_PATH"), job_id))
sys.exit(exit_code)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"CLOUD_ML_JOB_ID",
"STAGE_GCS_PATH",
"OUTPUT_DIR",
"STAGING_DIR",
"NUM_HOROVOD_RETRIES",
"TASK_STARTUP_TIMEOUT_SECONDS",
"TF_CONFIG",
"BENCHMARK_NETWORK",
"GCS_OUTPUT_PATH"
] |
[]
|
["CLOUD_ML_JOB_ID", "STAGE_GCS_PATH", "OUTPUT_DIR", "STAGING_DIR", "NUM_HOROVOD_RETRIES", "TASK_STARTUP_TIMEOUT_SECONDS", "TF_CONFIG", "BENCHMARK_NETWORK", "GCS_OUTPUT_PATH"]
|
python
| 9 | 0 | |
rootpy/logger/__init__.py
|
"""
:py:mod:`rootpy` overrides the default logging class, inserting a check that
there exists a default logging handler. If there is not, it adds one.
In additon, this can be used to intercept ROOT's log messages and redirect them
through python's logging subsystem
Example use:
.. sourcecode:: python
# Disable colored logging (not needed if writing into a file,
# this is automatic).
# Must be done before :py:mod:`rootpy` logs any messages.
import logging; logging.basicConfig(level=logging.DEBUG)
from rootpy import log; log = log["/myapp"]
log.debug("Hello") # Results in "DEBUG:myapp] Hello"
# Suppress all myapp debug and info messages
log.setLevel(log.WARNING)
log.debug("Hello") # No effect
mymod = log["mymod"]
mymod.warning("Hello") # Results in "WARNING:myapp.mymod] Hello"
# Suppress all rootpy debug and info messages
log["/rootpy"].setLevel(log.WARNING)
# Suppress messages coming from TCanvas like
# INFO:ROOT.TCanvas.Print] png file /path/to/file.png has been created
log["/ROOT.TCanvas.Print"].setLevel(log.WARNING)
# Suppress warning messages coming the ``TClass`` constructor:
log["/ROOT.TClass.TClass"].setLevel(log.ERROR)
# Precisely remove messages containing the text "no dictionary for class"
# (doesn't work when attached to parent logger)
import logging
class NoDictMessagesFilter(logging.Filter):
def filter(self, record):
return "no dictionary for class" not in record.msg
log["/ROOT.TClass.TClass"].addFilter(NoDictMessagesFilter())
# Turn ROOT errors into exceptions
from rootpy.logger.magic import DANGER
DANGER.enable = True
import ROOT
ROOT.Error("test", "Test fatal")
# Result:
# ERROR:ROOT.test] Test fatal
# Traceback (most recent call last):
# File "test.py", line 36, in <module>
# ROOT.Fatal("test", "Test fatal")
# File "test.py", line 36, in <module>
# ROOT.Fatal("test", "Test fatal")
# File "rootpy/logger/roothandler.py", line 40, in python_logging_error_handler
# raise ROOTError(level, location, msg)
# rootpy.ROOTError: level=6000, loc='test', msg='Test fatal'
# Primitive function tracing:
@log.trace()
def salut():
return
@log.trace()
def hello(what):
salut()
return "42"
hello("world")
# Result:
# DEBUG:myapp.trace.hello] > ('world',) {}
# DEBUG:myapp.trace.salut] > () {}
# DEBUG:myapp.trace.salut] < return None [0.00 sec]
# DEBUG:myapp.trace.hello] < return 42 [0.00 sec]
"""
from __future__ import absolute_import
import logging
import os
import re
import sys
import threading
from functools import wraps
from time import time
from .utils import check_tty
from .extended_logger import ExtendedLogger
logging.setLoggerClass(ExtendedLogger)
log = logging.getLogger("rootpy")
if not os.environ.get("DEBUG", False):
log.setLevel(log.INFO)
from .formatter import CustomFormatter, CustomColoredFormatter
def check_tty_handler(handler):
if not hasattr(handler, "stream"):
return False
return check_tty(handler.stream)
log_root = logging.getLogger()
if not log_root.handlers:
# Add a handler to the top-level logger if it doesn't already have one
handler = logging.StreamHandler()
if check_tty_handler(handler):
handler.setFormatter(CustomColoredFormatter())
else:
handler.setFormatter(CustomFormatter())
log_root.addHandler(handler)
# Make the top-level logger as verbose as possible.
# Log messages that make it to the screen are controlled by the handler
log_root.setLevel(logging.DEBUG)
l = logging.getLogger("rootpy.logger")
l.debug("Adding rootpy's default logging handler to the root logger")
from .magic import set_error_handler
from .roothandler import python_logging_error_handler
__all__ = [
'log_trace',
'set_error_handler',
'python_logging_error_handler',
'LogFilter',
'LiteralFilter',
]
class TraceDepth(threading.local):
value = -1
trace_depth = TraceDepth()
def log_trace(logger, level=logging.DEBUG, show_enter=True, show_exit=True):
"""
log a statement on function entry and exit
"""
def wrap(function):
l = logger.getChild(function.__name__).log
@wraps(function)
def thunk(*args, **kwargs):
global trace_depth
trace_depth.value += 1
try:
start = time()
if show_enter:
l(level, "{0}> {1} {2}".format(" "*trace_depth.value,
args, kwargs))
try:
result = function(*args, **kwargs)
except:
_, result, _ = sys.exc_info()
raise
finally:
if show_exit:
l(level, "{0}< return {1} [{2:.2f} sec]".format(
" "*trace_depth.value, result, time() - start))
finally:
trace_depth.value -= 1
return result
return thunk
return wrap
class LogFilter(logging.Filter):
def __init__(self, logger, message_regex):
logging.Filter.__init__(self)
self.logger = logger
self.message_regex = re.compile(message_regex)
def __enter__(self):
self.logger.addFilter(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logger.removeFilter(self)
def filter(self, record):
return not self.message_regex.match(record.getMessage())
class LiteralFilter(logging.Filter):
def __init__(self, literals):
logging.Filter.__init__(self)
self.literals = literals
def filter(self, record):
return record.getMessage() not in self.literals
# filter superfluous ROOT warnings
for histtype in 'CSIFD':
for dimen in '123':
log["/ROOT.TH{0}{1}.Add".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to add histograms with different axis limits",]))
log["/ROOT.TH{0}{1}.Divide".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to divide histograms with different axis limits",]))
log["/ROOT.TH{0}{1}.Multiply".format(dimen, histtype)].addFilter(
LiteralFilter([
"Attempt to multiply histograms with different axis limits",]))
|
[] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
python
| 1 | 0 | |
eksrollup/config.py
|
from dotenv import load_dotenv
from distutils.util import strtobool
import os
load_dotenv('{}/.env'.format(os.getcwd()))
def str_to_bool(val):
return val if type(val) is bool else bool(strtobool(val))
app_config = {
'K8S_AUTOSCALER_ENABLED': str_to_bool(os.getenv('K8S_AUTOSCALER_ENABLED', False)),
'K8S_AUTOSCALER_NAMESPACE': os.getenv('K8S_AUTOSCALER_NAMESPACE', 'default'),
'K8S_AUTOSCALER_DEPLOYMENT': os.getenv('K8S_AUTOSCALER_DEPLOYMENT', 'cluster-autoscaler'),
'K8S_AUTOSCALER_REPLICAS': int(os.getenv('K8S_AUTOSCALER_REPLICAS', 2)),
'ASG_DESIRED_STATE_TAG': os.getenv('ASG_DESIRED_STATE_TAG', 'eks-rolling-update:desired_capacity'),
'ASG_ORIG_CAPACITY_TAG': os.getenv('ASG_ORIG_CAPACITY_TAG', 'eks-rolling-update:original_capacity'),
'ASG_ORIG_MAX_CAPACITY_TAG': os.getenv('ASG_ORIG_MAX_CAPACITY_TAG', 'eks-rolling-update:original_max_capacity'),
'ASG_WAIT_FOR_DETACHMENT': str_to_bool(os.getenv('ASG_WAIT_FOR_DETACHMENT', True)),
'ASG_USE_TERMINATION_POLICY': str_to_bool(os.getenv('ASG_USE_TERMINATION_POLICY', False)),
'INSTANCE_WAIT_FOR_STOPPING': str_to_bool(os.getenv('INSTANCE_WAIT_FOR_STOPPING', False)),
'CLUSTER_HEALTH_WAIT': int(os.getenv('CLUSTER_HEALTH_WAIT', 90)),
'CLUSTER_HEALTH_RETRY': int(os.getenv('CLUSTER_HEALTH_RETRY', 1)),
'GLOBAL_MAX_RETRY': int(os.getenv('GLOBAL_MAX_RETRY', 12)),
'GLOBAL_HEALTH_WAIT': int(os.getenv('GLOBAL_HEALTH_WAIT', 20)),
'BETWEEN_NODES_WAIT': int(os.getenv('BETWEEN_NODES_WAIT', 0)),
'RUN_MODE': int(os.getenv('RUN_MODE', 1)),
'DRY_RUN': str_to_bool(os.getenv('DRY_RUN', False)),
'EXCLUDE_NODE_LABEL_KEYS': os.getenv('EXCLUDE_NODE_LABEL_KEYS', 'spotinst.io/node-lifecycle').split(),
'EXTRA_DRAIN_ARGS': os.getenv('EXTRA_DRAIN_ARGS', '').split(),
'MAX_ALLOWABLE_NODE_AGE': int(os.getenv('MAX_ALLOWABLE_NODE_AGE', 6)),
'TAINT_NODES': str_to_bool(os.getenv('TAINT_NODES', False)),
'BATCH_SIZE': int(os.getenv('BATCH_SIZE', 0))
}
|
[] |
[] |
[
"CLUSTER_HEALTH_RETRY",
"ASG_USE_TERMINATION_POLICY",
"EXCLUDE_NODE_LABEL_KEYS",
"K8S_AUTOSCALER_REPLICAS",
"ASG_WAIT_FOR_DETACHMENT",
"ASG_ORIG_CAPACITY_TAG",
"CLUSTER_HEALTH_WAIT",
"BETWEEN_NODES_WAIT",
"K8S_AUTOSCALER_NAMESPACE",
"EXTRA_DRAIN_ARGS",
"INSTANCE_WAIT_FOR_STOPPING",
"K8S_AUTOSCALER_ENABLED",
"TAINT_NODES",
"GLOBAL_HEALTH_WAIT",
"ASG_DESIRED_STATE_TAG",
"ASG_ORIG_MAX_CAPACITY_TAG",
"RUN_MODE",
"K8S_AUTOSCALER_DEPLOYMENT",
"GLOBAL_MAX_RETRY",
"DRY_RUN",
"MAX_ALLOWABLE_NODE_AGE",
"BATCH_SIZE"
] |
[]
|
["CLUSTER_HEALTH_RETRY", "ASG_USE_TERMINATION_POLICY", "EXCLUDE_NODE_LABEL_KEYS", "K8S_AUTOSCALER_REPLICAS", "ASG_WAIT_FOR_DETACHMENT", "ASG_ORIG_CAPACITY_TAG", "CLUSTER_HEALTH_WAIT", "BETWEEN_NODES_WAIT", "K8S_AUTOSCALER_NAMESPACE", "EXTRA_DRAIN_ARGS", "INSTANCE_WAIT_FOR_STOPPING", "K8S_AUTOSCALER_ENABLED", "TAINT_NODES", "GLOBAL_HEALTH_WAIT", "ASG_DESIRED_STATE_TAG", "ASG_ORIG_MAX_CAPACITY_TAG", "RUN_MODE", "K8S_AUTOSCALER_DEPLOYMENT", "GLOBAL_MAX_RETRY", "DRY_RUN", "MAX_ALLOWABLE_NODE_AGE", "BATCH_SIZE"]
|
python
| 22 | 0 | |
pkg/controllers/management/node/controller.go
|
package node
import (
"context"
"crypto/sha256"
"encoding/base32"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rancher/norman/objectclient"
"github.com/rancher/norman/types/convert"
"github.com/rancher/norman/types/values"
"github.com/rancher/rancher/pkg/api/customization/clusterregistrationtokens"
"github.com/rancher/rancher/pkg/controllers/management/drivers/nodedriver"
"github.com/rancher/rancher/pkg/encryptedstore"
"github.com/rancher/rancher/pkg/jailer"
"github.com/rancher/rancher/pkg/namespace"
"github.com/rancher/rancher/pkg/nodeconfig"
"github.com/rancher/rancher/pkg/ref"
"github.com/rancher/rancher/pkg/systemaccount"
"github.com/rancher/rancher/pkg/taints"
corev1 "github.com/rancher/types/apis/core/v1"
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/rancher/types/config"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
v1 "k8s.io/api/core/v1"
kerror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
defaultEngineInstallURL = "https://releases.rancher.com/install-docker/17.03.2.sh"
amazonec2 = "amazonec2"
)
// aliases maps Schema field => driver field
// The opposite of this lives in pkg/controllers/management/drivers/nodedriver/machine_driver.go
var aliases = map[string]map[string]string{
"aliyunecs": map[string]string{"sshKeyContents": "sshKeypath"},
"amazonec2": map[string]string{"sshKeyContents": "sshKeypath", "userdata": "userdata"},
"azure": map[string]string{"customData": "customData"},
"digitalocean": map[string]string{"sshKeyContents": "sshKeyPath", "userdata": "userdata"},
"exoscale": map[string]string{"sshKey": "sshKey", "userdata": "userdata"},
"openstack": map[string]string{"cacert": "cacert", "privateKeyFile": "privateKeyFile", "userDataFile": "userDataFile"},
"otc": map[string]string{"privateKeyFile": "privateKeyFile"},
"packet": map[string]string{"userdata": "userdata"},
"vmwarevsphere": map[string]string{"cloudConfig": "cloud-config"},
}
var IgnoreCredFieldForTemplate = map[string]map[string]bool{
amazonec2: {"region": true},
}
func Register(ctx context.Context, management *config.ManagementContext) {
secretStore, err := nodeconfig.NewStore(management.Core.Namespaces(""), management.Core)
if err != nil {
logrus.Fatal(err)
}
nodeClient := management.Management.Nodes("")
nodeLifecycle := &Lifecycle{
systemAccountManager: systemaccount.NewManager(management),
secretStore: secretStore,
nodeClient: nodeClient,
nodeTemplateClient: management.Management.NodeTemplates(""),
nodePoolLister: management.Management.NodePools("").Controller().Lister(),
nodeTemplateGenericClient: management.Management.NodeTemplates("").ObjectClient().UnstructuredClient(),
configMapGetter: management.K8sClient.CoreV1(),
clusterLister: management.Management.Clusters("").Controller().Lister(),
schemaLister: management.Management.DynamicSchemas("").Controller().Lister(),
credLister: management.Core.Secrets("").Controller().Lister(),
devMode: os.Getenv("CATTLE_DEV_MODE") != "",
}
nodeClient.AddLifecycle(ctx, "node-controller", nodeLifecycle)
}
type Lifecycle struct {
systemAccountManager *systemaccount.Manager
secretStore *encryptedstore.GenericEncryptedStore
nodeTemplateGenericClient objectclient.GenericClient
nodeClient v3.NodeInterface
nodeTemplateClient v3.NodeTemplateInterface
nodePoolLister v3.NodePoolLister
configMapGetter typedv1.ConfigMapsGetter
clusterLister v3.ClusterLister
schemaLister v3.DynamicSchemaLister
credLister corev1.SecretLister
devMode bool
}
func (m *Lifecycle) setupCustom(obj *v3.Node) {
obj.Status.NodeConfig = &v3.RKEConfigNode{
NodeName: obj.Namespace + ":" + obj.Name,
HostnameOverride: obj.Spec.RequestedHostname,
Address: obj.Spec.CustomConfig.Address,
InternalAddress: obj.Spec.CustomConfig.InternalAddress,
User: obj.Spec.CustomConfig.User,
DockerSocket: obj.Spec.CustomConfig.DockerSocket,
SSHKey: obj.Spec.CustomConfig.SSHKey,
Labels: obj.Spec.CustomConfig.Label,
Port: "22",
Role: roles(obj),
Taints: taints.GetRKETaintsFromStrings(obj.Spec.CustomConfig.Taints),
}
if obj.Status.NodeConfig.User == "" {
obj.Status.NodeConfig.User = "root"
}
obj.Status.InternalNodeStatus.Addresses = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: obj.Status.NodeConfig.Address,
},
}
}
func isCustom(obj *v3.Node) bool {
return obj.Spec.CustomConfig != nil && obj.Spec.CustomConfig.Address != ""
}
func (m *Lifecycle) setWaiting(node *v3.Node) {
v3.NodeConditionRegistered.IsUnknown(node)
v3.NodeConditionRegistered.Message(node, "waiting to register with Kubernetes")
}
func (m *Lifecycle) Create(obj *v3.Node) (runtime.Object, error) {
if isCustom(obj) {
m.setupCustom(obj)
newObj, err := v3.NodeConditionInitialized.Once(obj, func() (runtime.Object, error) {
if err := validateCustomHost(obj); err != nil {
return obj, err
}
m.setWaiting(obj)
return obj, nil
})
return newObj.(*v3.Node), err
}
if obj.Spec.NodeTemplateName == "" {
return obj, nil
}
newObj, err := v3.NodeConditionInitialized.Once(obj, func() (runtime.Object, error) {
nodeConfig, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, errors.Wrap(err, "failed to create node driver config")
}
defer nodeConfig.Cleanup()
err = m.refreshNodeConfig(nodeConfig, obj)
if err != nil {
return nil, errors.WithMessagef(err, "unable to create config for node %v", obj.Name)
}
template, err := m.getNodeTemplate(obj.Spec.NodeTemplateName)
if err != nil {
return obj, err
}
obj.Status.NodeTemplateSpec = &template.Spec
if obj.Spec.RequestedHostname == "" {
obj.Spec.RequestedHostname = obj.Name
}
if obj.Status.NodeTemplateSpec.EngineInstallURL == "" {
obj.Status.NodeTemplateSpec.EngineInstallURL = defaultEngineInstallURL
}
if !m.devMode {
err := jailer.CreateJail(obj.Namespace)
if err != nil {
return nil, errors.WithMessage(err, "node create jail error")
}
}
return obj, nil
})
return newObj.(*v3.Node), err
}
func (m *Lifecycle) getNodeTemplate(nodeTemplateName string) (*v3.NodeTemplate, error) {
ns, n := ref.Parse(nodeTemplateName)
return m.nodeTemplateClient.GetNamespaced(ns, n, metav1.GetOptions{})
}
func (m *Lifecycle) getNodePool(nodePoolName string) (*v3.NodePool, error) {
ns, p := ref.Parse(nodePoolName)
return m.nodePoolLister.Get(ns, p)
}
func (m *Lifecycle) Remove(obj *v3.Node) (runtime.Object, error) {
if obj.Status.NodeTemplateSpec == nil {
return obj, nil
}
newObj, err := v3.NodeConditionRemoved.DoUntilTrue(obj, func() (runtime.Object, error) {
found, err := m.isNodeInAppliedSpec(obj)
if err != nil {
return obj, err
}
if found {
return obj, errors.New("waiting for node to be removed from cluster")
}
if !m.devMode {
err := jailer.CreateJail(obj.Namespace)
if err != nil {
return nil, errors.WithMessage(err, "node remove jail error")
}
}
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, err
}
if err := config.Restore(); err != nil {
return obj, err
}
defer config.Remove()
err = m.refreshNodeConfig(config, obj)
if err != nil {
return nil, errors.WithMessagef(err, "unable to refresh config for node %v", obj.Name)
}
mExists, err := nodeExists(config.Dir(), obj)
if err != nil {
return obj, err
}
if mExists {
logrus.Infof("Removing node %s", obj.Spec.RequestedHostname)
if err := deleteNode(config.Dir(), obj); err != nil {
return obj, err
}
logrus.Infof("Removing node %s done", obj.Spec.RequestedHostname)
}
return obj, nil
})
return newObj.(*v3.Node), err
}
func (m *Lifecycle) provision(driverConfig, nodeDir string, obj *v3.Node) (*v3.Node, error) {
configRawMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(driverConfig), &configRawMap); err != nil {
return obj, errors.Wrap(err, "failed to unmarshal node config")
}
// Since we know this will take a long time persist so user sees status
obj, err := m.nodeClient.Update(obj)
if err != nil {
return obj, err
}
err = aliasToPath(obj.Status.NodeTemplateSpec.Driver, configRawMap, obj.Namespace)
if err != nil {
return obj, err
}
createCommandsArgs := buildCreateCommand(obj, configRawMap)
cmd, err := buildCommand(nodeDir, obj, createCommandsArgs)
if err != nil {
return obj, err
}
logrus.Infof("Provisioning node %s", obj.Spec.RequestedHostname)
stdoutReader, stderrReader, err := startReturnOutput(cmd)
if err != nil {
return obj, err
}
defer stdoutReader.Close()
defer stderrReader.Close()
defer cmd.Wait()
obj, err = m.reportStatus(stdoutReader, stderrReader, obj)
if err != nil {
return obj, err
}
if err := cmd.Wait(); err != nil {
return obj, err
}
if err := m.deployAgent(nodeDir, obj); err != nil {
return obj, err
}
logrus.Infof("Provisioning node %s done", obj.Spec.RequestedHostname)
return obj, nil
}
func aliasToPath(driver string, config map[string]interface{}, ns string) error {
devMode := os.Getenv("CATTLE_DEV_MODE") != ""
baseDir := path.Join("/opt/jail", ns)
if devMode {
baseDir = os.TempDir()
}
// Check if the required driver has aliased fields
if fields, ok := aliases[driver]; ok {
hasher := sha256.New()
for schemaField, driverField := range fields {
if fileRaw, ok := config[schemaField]; ok {
fileContents := fileRaw.(string)
// Delete our aliased fields
delete(config, schemaField)
if fileContents == "" {
continue
}
fileName := driverField
if ok := nodedriver.SSHKeyFields[schemaField]; ok {
fileName = "id_rsa"
// The ending newline gets stripped, add em back
if !strings.HasSuffix(fileContents, "\n") {
fileContents = fileContents + "\n"
}
}
hasher.Reset()
hasher.Write([]byte(fileContents))
sha := base32.StdEncoding.WithPadding(-1).EncodeToString(hasher.Sum(nil))[:10]
fileDir := path.Join(baseDir, sha)
// Delete the fileDir path if it's not a directory
if info, err := os.Stat(fileDir); err == nil && !info.IsDir() {
if err := os.Remove(fileDir); err != nil {
return err
}
}
err := os.MkdirAll(fileDir, 0755)
if err != nil {
return err
}
fullPath := path.Join(fileDir, fileName)
err = ioutil.WriteFile(fullPath, []byte(fileContents), 0600)
if err != nil {
return err
}
// Add the field and path
if devMode {
config[driverField] = fullPath
} else {
config[driverField] = path.Join("/", sha, fileName)
}
}
}
}
return nil
}
func (m *Lifecycle) deployAgent(nodeDir string, obj *v3.Node) error {
token, err := m.systemAccountManager.GetOrCreateSystemClusterToken(obj.Namespace)
if err != nil {
return err
}
cluster, err := m.clusterLister.Get("", obj.Namespace)
if err != nil {
return err
}
drun := clusterregistrationtokens.NodeCommand(token, cluster)
args := buildAgentCommand(obj, drun)
cmd, err := buildCommand(nodeDir, obj, args)
if err != nil {
return err
}
output, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, string(output))
}
return nil
}
func (m *Lifecycle) ready(obj *v3.Node) (*v3.Node, error) {
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, err
}
defer config.Cleanup()
if err := config.Restore(); err != nil {
return obj, err
}
err = m.refreshNodeConfig(config, obj)
if err != nil {
return nil, errors.WithMessagef(err, "unable to refresh config for node %v", obj.Name)
}
driverConfig, err := config.DriverConfig()
if err != nil {
return nil, err
}
// Provision in the background so we can poll and save the config
done := make(chan error)
go func() {
newObj, err := m.provision(driverConfig, config.Dir(), obj)
obj = newObj
done <- err
}()
// Poll and save config
outer:
for {
select {
case err = <-done:
break outer
case <-time.After(5 * time.Second):
config.Save()
}
}
newObj, saveError := v3.NodeConditionConfigSaved.Once(obj, func() (runtime.Object, error) {
return m.saveConfig(config, config.FullDir(), obj)
})
obj = newObj.(*v3.Node)
if err == nil {
return obj, saveError
}
return obj, err
}
func (m *Lifecycle) Updated(obj *v3.Node) (runtime.Object, error) {
newObj, err := v3.NodeConditionProvisioned.Once(obj, func() (runtime.Object, error) {
if obj.Status.NodeTemplateSpec == nil {
m.setWaiting(obj)
return obj, nil
}
if !m.devMode {
logrus.Infof("Creating jail for %v", obj.Namespace)
err := jailer.CreateJail(obj.Namespace)
if err != nil {
return nil, errors.WithMessage(err, "node update jail error")
}
}
obj, err := m.ready(obj)
if err == nil {
m.setWaiting(obj)
}
return obj, err
})
return newObj.(*v3.Node), err
}
func (m *Lifecycle) saveConfig(config *nodeconfig.NodeConfig, nodeDir string, obj *v3.Node) (*v3.Node, error) {
logrus.Infof("Generating and uploading node config %s", obj.Spec.RequestedHostname)
if err := config.Save(); err != nil {
return obj, err
}
ip, err := config.IP()
if err != nil {
return obj, err
}
interalAddress, err := config.InternalIP()
if err != nil {
return obj, err
}
keyPath, err := config.SSHKeyPath()
if err != nil {
return obj, err
}
sshKey, err := getSSHKey(nodeDir, keyPath, obj)
if err != nil {
return obj, err
}
sshUser, err := config.SSHUser()
if err != nil {
return obj, err
}
if err := config.Save(); err != nil {
return obj, err
}
template, err := m.getNodeTemplate(obj.Spec.NodeTemplateName)
if err != nil {
return obj, err
}
pool, err := m.getNodePool(obj.Spec.NodePoolName)
if err != nil {
return obj, err
}
obj.Status.NodeConfig = &v3.RKEConfigNode{
NodeName: obj.Namespace + ":" + obj.Name,
Address: ip,
InternalAddress: interalAddress,
User: sshUser,
Role: roles(obj),
HostnameOverride: obj.Spec.RequestedHostname,
SSHKey: sshKey,
Labels: template.Labels,
}
obj.Status.InternalNodeStatus.Addresses = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: obj.Status.NodeConfig.Address,
},
}
if len(obj.Status.NodeConfig.Role) == 0 {
obj.Status.NodeConfig.Role = []string{"worker"}
}
templateSet := taints.GetKeyEffectTaintSet(template.Spec.NodeTaints)
nodeSet := taints.GetKeyEffectTaintSet(pool.Spec.NodeTaints)
expectTaints := pool.Spec.NodeTaints
for key, ti := range templateSet {
// the expect taints are based on the node pool. so we don't need to set taints with same key and effect by template because
// the taints from node pool should override the taints from template.
if _, ok := nodeSet[key]; !ok {
expectTaints = append(expectTaints, template.Spec.NodeTaints[ti])
}
}
obj.Status.NodeConfig.Taints = taints.GetRKETaintsFromTaints(expectTaints)
return obj, nil
}
func (m *Lifecycle) refreshNodeConfig(nc *nodeconfig.NodeConfig, obj *v3.Node) error {
template, err := m.getNodeTemplate(obj.Spec.NodeTemplateName)
if err != nil {
return err
}
rawTemplate, err := m.nodeTemplateGenericClient.GetNamespaced(template.Namespace, template.Name, metav1.GetOptions{})
if err != nil {
return err
}
data := rawTemplate.(*unstructured.Unstructured).Object
rawConfig, ok := values.GetValue(data, template.Spec.Driver+"Config")
if !ok {
return fmt.Errorf("node config not specified for node %v", obj.Name)
}
if err := m.updateRawConfigFromCredential(data, rawConfig, template); err != nil {
return err
}
var update bool
if template.Spec.Driver == amazonec2 {
setEc2ClusterIDTag(rawConfig, obj.Namespace)
//TODO: Update to not be amazon specific, this needs to be moved to the driver
update, err = nc.UpdateAmazonAuth(rawConfig)
if err != nil {
return err
}
}
bytes, err := json.Marshal(rawConfig)
if err != nil {
return errors.Wrap(err, "failed to marshal node driver config")
}
newConfig := string(bytes)
currentConfig, err := nc.DriverConfig()
if err != nil {
return err
}
if currentConfig != newConfig || update {
err = nc.SetDriverConfig(string(bytes))
if err != nil {
return err
}
return nc.Save()
}
return nil
}
func (m *Lifecycle) isNodeInAppliedSpec(node *v3.Node) (bool, error) {
// worker/controlplane nodes can just be immediately deleted
if !node.Spec.Etcd {
return false, nil
}
cluster, err := m.clusterLister.Get("", node.Namespace)
if err != nil {
if kerror.IsNotFound(err) {
return false, nil
}
return false, err
}
if cluster == nil {
return false, nil
}
if cluster.DeletionTimestamp != nil {
return false, nil
}
if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig == nil {
return false, nil
}
for _, rkeNode := range cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Nodes {
nodeName := rkeNode.NodeName
if len(nodeName) == 0 {
continue
}
if nodeName == fmt.Sprintf("%s:%s", node.Namespace, node.Name) {
return true, nil
}
}
return false, nil
}
func validateCustomHost(obj *v3.Node) error {
if obj.Spec.Imported {
return nil
}
customConfig := obj.Spec.CustomConfig
signer, err := ssh.ParsePrivateKey([]byte(customConfig.SSHKey))
if err != nil {
return errors.Wrapf(err, "sshKey format is invalid")
}
config := &ssh.ClientConfig{
User: customConfig.User,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
conn, err := ssh.Dial("tcp", customConfig.Address+":22", config)
if err != nil {
return errors.Wrapf(err, "Failed to validate ssh connection to address [%s]", customConfig.Address)
}
defer conn.Close()
return nil
}
func roles(node *v3.Node) []string {
var roles []string
if node.Spec.Etcd {
roles = append(roles, "etcd")
}
if node.Spec.ControlPlane {
roles = append(roles, "controlplane")
}
if node.Spec.Worker {
roles = append(roles, "worker")
}
if len(roles) == 0 {
return []string{"worker"}
}
return roles
}
func (m *Lifecycle) setCredFields(data interface{}, fields map[string]v3.Field, toIgnore map[string]bool, credID string) error {
splitID := strings.Split(credID, ":")
if len(splitID) != 2 {
return fmt.Errorf("invalid credential id %s", credID)
}
cred, err := m.credLister.Get(namespace.GlobalNamespace, splitID[1])
if err != nil {
return err
}
if ans := convert.ToMapInterface(data); len(ans) > 0 {
for key, val := range cred.Data {
splitKey := strings.Split(key, "-")
if len(splitKey) == 2 && strings.HasSuffix(splitKey[0], "Config") {
if _, ok := toIgnore[splitKey[1]]; ok {
continue
}
if _, ok := fields[splitKey[1]]; ok {
ans[splitKey[1]] = string(val)
}
}
}
}
return nil
}
func (m *Lifecycle) updateRawConfigFromCredential(data map[string]interface{}, rawConfig interface{}, template *v3.NodeTemplate) error {
credID := convert.ToString(values.GetValueN(data, "spec", "cloudCredentialName"))
if credID != "" {
driverName := template.Spec.Driver
existingSchema, err := m.schemaLister.Get("", driverName+"config")
if err != nil {
return err
}
toIgnore := map[string]bool{}
for field := range existingSchema.Spec.ResourceFields {
if val, ok := IgnoreCredFieldForTemplate[driverName]; ok {
if _, ok := val[field]; ok {
toIgnore[field] = true
}
}
}
logrus.Debugf("setCredFields for credentialName %s ignoreFields %v", credID, toIgnore)
err = m.setCredFields(rawConfig, existingSchema.Spec.ResourceFields, toIgnore, credID)
if err != nil {
return errors.Wrap(err, "failed to set credential fields")
}
}
return nil
}
|
[
"\"CATTLE_DEV_MODE\"",
"\"CATTLE_DEV_MODE\""
] |
[] |
[
"CATTLE_DEV_MODE"
] |
[]
|
["CATTLE_DEV_MODE"]
|
go
| 1 | 0 | |
sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts.py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_receipts.py
DESCRIPTION:
This sample demonstrates how to recognize and extract common fields from receipts,
using a pre-trained receipt model. For a suggested approach to extracting information
from receipts, see sample_strongly_typed_recognized_form.py.
See fields found on a receipt here:
https://aka.ms/formrecognizer/receiptfields
USAGE:
python sample_recognize_receipts.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
class RecognizeReceiptsSample(object):
def recognize_receipts(self):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
"..", "./sample_forms/receipt/contoso-allinone.jpg"))
# [START recognize_receipts]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_receipts(receipt=f, locale="en-US")
receipts = poller.result()
for idx, receipt in enumerate(receipts):
print("--------Recognizing receipt #{}--------".format(idx+1))
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {} has confidence: {}".format(receipt_type.value, receipt_type.confidence))
merchant_name = receipt.fields.get("MerchantName")
if merchant_name:
print("Merchant Name: {} has confidence: {}".format(merchant_name.value, merchant_name.confidence))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {} has confidence: {}".format(transaction_date.value, transaction_date.confidence))
print("Receipt items:")
for idx, item in enumerate(receipt.fields.get("Items").value):
print("...Item #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("......Item Name: {} has confidence: {}".format(item_name.value, item_name.confidence))
item_quantity = item.value.get("Quantity")
if item_quantity:
print("......Item Quantity: {} has confidence: {}".format(item_quantity.value, item_quantity.confidence))
item_price = item.value.get("Price")
if item_price:
print("......Individual Item Price: {} has confidence: {}".format(item_price.value, item_price.confidence))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("......Total Item Price: {} has confidence: {}".format(item_total_price.value, item_total_price.confidence))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} has confidence: {}".format(subtotal.value, subtotal.confidence))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {} has confidence: {}".format(tax.value, tax.confidence))
tip = receipt.fields.get("Tip")
if tip:
print("Tip: {} has confidence: {}".format(tip.value, tip.confidence))
total = receipt.fields.get("Total")
if total:
print("Total: {} has confidence: {}".format(total.value, total.confidence))
print("--------------------------------------")
# [END recognize_receipts]
if __name__ == '__main__':
sample = RecognizeReceiptsSample()
sample.recognize_receipts()
|
[] |
[] |
[
"AZURE_FORM_RECOGNIZER_KEY",
"AZURE_FORM_RECOGNIZER_ENDPOINT"
] |
[]
|
["AZURE_FORM_RECOGNIZER_KEY", "AZURE_FORM_RECOGNIZER_ENDPOINT"]
|
python
| 2 | 0 | |
lib/input/tests/http_server_test.go
|
package tests
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"sync"
"testing"
"time"
"github.com/Jeffail/benthos/v3/lib/input"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/manager"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/message/roundtrip"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/ratelimit"
"github.com/Jeffail/benthos/v3/lib/response"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/gorilla/websocket"
)
type apiRegMutWrapper struct {
mut *http.ServeMux
}
func (a apiRegMutWrapper) RegisterEndpoint(path, desc string, h http.HandlerFunc) {
a.mut.HandleFunc(path, h)
}
func TestHTTPBasic(t *testing.T) {
t.Parallel()
nTestLoops := 100
reg := apiRegMutWrapper{mut: &http.ServeMux{}}
mgr, err := manager.New(manager.NewConfig(), reg, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
conf := input.NewConfig()
conf.HTTPServer.Path = "/testpost"
h, err := input.NewHTTPServer(conf, mgr, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
server := httptest.NewServer(reg.mut)
defer server.Close()
// Test both single and multipart messages.
for i := 0; i < nTestLoops; i++ {
testStr := fmt.Sprintf("test%v", i)
testResponse := fmt.Sprintf("response%v", i)
// Send it as single part
go func(input, output string) {
res, err := http.Post(
server.URL+"/testpost",
"application/octet-stream",
bytes.NewBuffer([]byte(input)),
)
if err != nil {
t.Fatal(err)
} else if res.StatusCode != 200 {
t.Fatalf("Wrong error code returned: %v", res.StatusCode)
}
resBytes, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if exp, act := output, string(resBytes); exp != act {
t.Errorf("Wrong sync response: %v != %v", act, exp)
}
}(testStr, testResponse)
var ts types.Transaction
select {
case ts = <-h.TransactionChan():
if res := string(ts.Payload.Get(0).Get()); res != testStr {
t.Errorf("Wrong result, %v != %v", ts.Payload, res)
}
ts.Payload.Get(0).Set([]byte(testResponse))
roundtrip.SetAsResponse(ts.Payload)
case <-time.After(time.Second):
t.Error("Timed out waiting for message")
}
select {
case ts.ResponseChan <- response.NewAck():
case <-time.After(time.Second):
t.Error("Timed out waiting for response")
}
}
// Test MIME multipart parsing, as defined in RFC 2046
for i := 0; i < nTestLoops; i++ {
partOne := fmt.Sprintf("test%v part one", i)
partTwo := fmt.Sprintf("test%v part two", i)
testStr := fmt.Sprintf(
"--foo\r\n"+
"Content-Type: application/octet-stream\r\n\r\n"+
"%v\r\n"+
"--foo\r\n"+
"Content-Type: application/octet-stream\r\n\r\n"+
"%v\r\n"+
"--foo--\r\n",
partOne, partTwo)
// Send it as multi part
go func() {
if res, err := http.Post(
server.URL+"/testpost",
"multipart/mixed; boundary=foo",
bytes.NewBuffer([]byte(testStr)),
); err != nil {
t.Fatal(err)
} else if res.StatusCode != 200 {
t.Fatalf("Wrong error code returned: %v", res.StatusCode)
}
}()
var ts types.Transaction
select {
case ts = <-h.TransactionChan():
if exp, actual := 2, ts.Payload.Len(); exp != actual {
t.Errorf("Wrong number of parts: %v != %v", actual, exp)
} else if exp, actual := partOne, string(ts.Payload.Get(0).Get()); exp != actual {
t.Errorf("Wrong result, %v != %v", actual, exp)
} else if exp, actual := partTwo, string(ts.Payload.Get(1).Get()); exp != actual {
t.Errorf("Wrong result, %v != %v", actual, exp)
}
case <-time.After(time.Second):
t.Error("Timed out waiting for message")
}
select {
case ts.ResponseChan <- response.NewAck():
case <-time.After(time.Second):
t.Error("Timed out waiting for response")
}
}
h.CloseAsync()
}
func TestHTTPBadRequests(t *testing.T) {
t.Parallel()
reg := apiRegMutWrapper{mut: &http.ServeMux{}}
mgr, err := manager.New(manager.NewConfig(), reg, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
conf := input.NewConfig()
conf.HTTPServer.Path = "/testpost"
h, err := input.NewHTTPServer(conf, mgr, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
server := httptest.NewServer(reg.mut)
defer server.Close()
res, err := http.Get(server.URL + "/testpost")
if err != nil {
t.Error(err)
return
}
if exp, act := http.StatusMethodNotAllowed, res.StatusCode; exp != act {
t.Errorf("unexpected HTTP response code: %v != %v", exp, act)
}
h.CloseAsync()
if err := h.WaitForClose(time.Second * 5); err != nil {
t.Error(err)
}
}
func TestHTTPTimeout(t *testing.T) {
t.Parallel()
reg := apiRegMutWrapper{mut: &http.ServeMux{}}
mgr, err := manager.New(manager.NewConfig(), reg, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
conf := input.NewConfig()
conf.HTTPServer.Path = "/testpost"
conf.HTTPServer.Timeout = "1ms"
h, err := input.NewHTTPServer(conf, mgr, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
server := httptest.NewServer(reg.mut)
defer server.Close()
var res *http.Response
res, err = http.Post(
server.URL+"/testpost",
"application/octet-stream",
bytes.NewBuffer([]byte("hello world")),
)
if err != nil {
t.Fatal(err)
}
if exp, act := http.StatusRequestTimeout, res.StatusCode; exp != act {
t.Errorf("Unexpected status code: %v != %v", exp, act)
}
h.CloseAsync()
if err := h.WaitForClose(time.Second * 5); err != nil {
t.Error(err)
}
}
func TestHTTPRateLimit(t *testing.T) {
t.Parallel()
reg := apiRegMutWrapper{mut: &http.ServeMux{}}
rlConf := ratelimit.NewConfig()
rlConf.Type = ratelimit.TypeLocal
rlConf.Local.Count = 1
rlConf.Local.Interval = "60s"
mgrConf := manager.NewConfig()
mgrConf.RateLimits["foorl"] = rlConf
mgr, err := manager.New(mgrConf, reg, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
conf := input.NewConfig()
conf.HTTPServer.Path = "/testpost"
conf.HTTPServer.RateLimit = "foorl"
h, err := input.NewHTTPServer(conf, mgr, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
server := httptest.NewServer(reg.mut)
defer server.Close()
go func() {
var ts types.Transaction
select {
case ts = <-h.TransactionChan():
case <-time.After(time.Second):
t.Error("Timed out waiting for message")
}
select {
case ts.ResponseChan <- response.NewAck():
case <-time.After(time.Second):
t.Error("Timed out waiting for response")
}
}()
var res *http.Response
res, err = http.Post(
server.URL+"/testpost",
"application/octet-stream",
bytes.NewBuffer([]byte("hello world")),
)
if err != nil {
t.Fatal(err)
}
if exp, act := http.StatusOK, res.StatusCode; exp != act {
t.Errorf("Unexpected status code: %v != %v", exp, act)
}
res, err = http.Post(
server.URL+"/testpost",
"application/octet-stream",
bytes.NewBuffer([]byte("hello world")),
)
if err != nil {
t.Fatal(err)
}
if exp, act := http.StatusTooManyRequests, res.StatusCode; exp != act {
t.Errorf("Unexpected status code: %v != %v", exp, act)
}
h.CloseAsync()
if err := h.WaitForClose(time.Second * 5); err != nil {
t.Error(err)
}
}
func TestHTTPServerWebsockets(t *testing.T) {
t.Parallel()
reg := apiRegMutWrapper{mut: &http.ServeMux{}}
mgr, err := manager.New(manager.NewConfig(), reg, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
conf := input.NewConfig()
conf.HTTPServer.WSPath = "/testws"
h, err := input.NewHTTPServer(conf, mgr, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
server := httptest.NewServer(reg.mut)
defer server.Close()
purl, err := url.Parse(server.URL + "/testws")
if err != nil {
t.Fatal(err)
}
purl.Scheme = "ws"
var client *websocket.Conn
if client, _, err = websocket.DefaultDialer.Dial(purl.String(), http.Header{}); err != nil {
t.Fatal(err)
}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
if clientErr := client.WriteMessage(
websocket.BinaryMessage, []byte("hello world 1"),
); clientErr != nil {
t.Fatal(clientErr)
}
wg.Done()
}()
var ts types.Transaction
select {
case ts = <-h.TransactionChan():
case <-time.After(time.Second):
t.Error("Timed out waiting for message")
}
if exp, act := `[hello world 1]`, fmt.Sprintf("%s", message.GetAllBytes(ts.Payload)); exp != act {
t.Errorf("Unexpected message: %v != %v", act, exp)
}
select {
case ts.ResponseChan <- response.NewAck():
case <-time.After(time.Second):
t.Error("Timed out waiting for response")
}
wg.Wait()
wg.Add(1)
go func() {
if closeErr := client.WriteMessage(
websocket.BinaryMessage, []byte("hello world 2"),
); closeErr != nil {
t.Fatal(closeErr)
}
wg.Done()
}()
select {
case ts = <-h.TransactionChan():
case <-time.After(time.Second):
t.Error("Timed out waiting for message")
}
if exp, act := `[hello world 2]`, fmt.Sprintf("%s", message.GetAllBytes(ts.Payload)); exp != act {
t.Errorf("Unexpected message: %v != %v", act, exp)
}
select {
case ts.ResponseChan <- response.NewAck():
case <-time.After(time.Second):
t.Error("Timed out waiting for response")
}
wg.Wait()
h.CloseAsync()
if err := h.WaitForClose(time.Second * 5); err != nil {
t.Error(err)
}
}
func TestHTTPServerWSRateLimit(t *testing.T) {
t.Parallel()
reg := apiRegMutWrapper{mut: &http.ServeMux{}}
rlConf := ratelimit.NewConfig()
rlConf.Type = ratelimit.TypeLocal
rlConf.Local.Count = 1
rlConf.Local.Interval = "60s"
mgrConf := manager.NewConfig()
mgrConf.RateLimits["foorl"] = rlConf
mgr, err := manager.New(mgrConf, reg, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
conf := input.NewConfig()
conf.HTTPServer.WSPath = "/testws"
conf.HTTPServer.WSWelcomeMessage = "test welcome"
conf.HTTPServer.WSRateLimitMessage = "test rate limited"
conf.HTTPServer.RateLimit = "foorl"
h, err := input.NewHTTPServer(conf, mgr, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
server := httptest.NewServer(reg.mut)
defer server.Close()
purl, err := url.Parse(server.URL + "/testws")
if err != nil {
t.Fatal(err)
}
purl.Scheme = "ws"
var client *websocket.Conn
if client, _, err = websocket.DefaultDialer.Dial(purl.String(), http.Header{}); err != nil {
t.Fatal(err)
}
go func() {
var ts types.Transaction
select {
case ts = <-h.TransactionChan():
case <-time.After(time.Second):
t.Error("Timed out waiting for message")
}
select {
case ts.ResponseChan <- response.NewAck():
case <-time.After(time.Second):
t.Error("Timed out waiting for response")
}
}()
var msgBytes []byte
if _, msgBytes, err = client.ReadMessage(); err != nil {
t.Fatal(err)
}
if exp, act := "test welcome", string(msgBytes); exp != act {
t.Errorf("Unexpected welcome message: %v != %v", act, exp)
}
if err = client.WriteMessage(
websocket.BinaryMessage, []byte("hello world"),
); err != nil {
t.Fatal(err)
}
if err = client.WriteMessage(
websocket.BinaryMessage, []byte("hello world"),
); err != nil {
t.Fatal(err)
}
if _, msgBytes, err = client.ReadMessage(); err != nil {
t.Fatal(err)
}
if exp, act := "test rate limited", string(msgBytes); exp != act {
t.Errorf("Unexpected rate limit message: %v != %v", act, exp)
}
h.CloseAsync()
if err := h.WaitForClose(time.Second * 5); err != nil {
t.Error(err)
}
}
func TestHTTPSyncResponseHeaders(t *testing.T) {
t.Parallel()
reg := apiRegMutWrapper{mut: &http.ServeMux{}}
mgr, err := manager.New(manager.NewConfig(), reg, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
conf := input.NewConfig()
conf.HTTPServer.Path = "/testpost"
conf.HTTPServer.Response.Headers["Content-Type"] = "application/json"
conf.HTTPServer.Response.Headers["foo"] = "${!json_field:field1}"
h, err := input.NewHTTPServer(conf, mgr, log.Noop(), metrics.Noop())
if err != nil {
t.Fatal(err)
}
server := httptest.NewServer(reg.mut)
defer server.Close()
input := `{"foo":"test message","field1":"bar"}`
go func() {
res, err := http.Post(
server.URL+"/testpost",
"application/octet-stream",
bytes.NewBuffer([]byte(input)),
)
if err != nil {
t.Fatal(err)
} else if res.StatusCode != 200 {
t.Fatalf("Wrong error code returned: %v", res.StatusCode)
}
resBytes, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if exp, act := input, string(resBytes); exp != act {
t.Errorf("Wrong sync response: %v != %v", act, exp)
}
if exp, act := "application/json", res.Header.Get("Content-Type"); exp != act {
t.Errorf("Wrong sync response header: %v != %v", act, exp)
}
if exp, act := "bar", res.Header.Get("foo"); exp != act {
t.Errorf("Wrong sync response header: %v != %v", act, exp)
}
}()
var ts types.Transaction
select {
case ts = <-h.TransactionChan():
if res := string(ts.Payload.Get(0).Get()); res != input {
t.Errorf("Wrong result, %v != %v", ts.Payload, res)
}
roundtrip.SetAsResponse(ts.Payload)
case <-time.After(time.Second):
t.Fatal("Timed out waiting for message")
}
select {
case ts.ResponseChan <- response.NewAck():
case <-time.After(time.Second):
t.Error("Timed out waiting for response")
}
h.CloseAsync()
if err := h.WaitForClose(time.Second * 5); err != nil {
t.Error(err)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
mainapp/wsgi.py
|
"""
WSGI config for herokuprova project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mainapp.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
share/qt/extract_strings_qt.py
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/supernodecoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *supernodecoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("supernodecoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[] |
[] |
[
"XGETTEXT"
] |
[]
|
["XGETTEXT"]
|
python
| 1 | 0 | |
privilegedaccess/item/rolesettings/item/roledefinition/role_definition_request_builder.go
|
package roledefinition
import (
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be "github.com/microsoftgraph/msgraph-beta-sdk-go/models"
i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459 "github.com/microsoftgraph/msgraph-beta-sdk-go/models/odataerrors"
i38a5d1ceec83c79e0342a12b7be83204903989ad114bcc422ab464c67c6ab1bb "github.com/microsoftgraph/msgraph-beta-sdk-go/privilegedaccess/item/rolesettings/item/roledefinition/resource"
ie7c2e9df46952eaedb5a393f4e6f071d12bcfc57877fd35ccc5efc24af2e45ed "github.com/microsoftgraph/msgraph-beta-sdk-go/privilegedaccess/item/rolesettings/item/roledefinition/rolesetting"
)
// RoleDefinitionRequestBuilder provides operations to manage the roleDefinition property of the microsoft.graph.governanceRoleSetting entity.
type RoleDefinitionRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string
// The request adapter to use to execute the requests.
requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter
// Url template to use to build the URL for the current request builder
urlTemplate string
}
// RoleDefinitionRequestBuilderDeleteRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type RoleDefinitionRequestBuilderDeleteRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
}
// RoleDefinitionRequestBuilderGetQueryParameters read-only. The role definition that is enforced with this role setting.
type RoleDefinitionRequestBuilderGetQueryParameters struct {
// Expand related entities
Expand []string `uriparametername:"%24expand"`
// Select properties to be returned
Select []string `uriparametername:"%24select"`
}
// RoleDefinitionRequestBuilderGetRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type RoleDefinitionRequestBuilderGetRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
// Request query parameters
QueryParameters *RoleDefinitionRequestBuilderGetQueryParameters
}
// RoleDefinitionRequestBuilderPatchRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type RoleDefinitionRequestBuilderPatchRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
}
// NewRoleDefinitionRequestBuilderInternal instantiates a new RoleDefinitionRequestBuilder and sets the default values.
func NewRoleDefinitionRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*RoleDefinitionRequestBuilder) {
m := &RoleDefinitionRequestBuilder{
}
m.urlTemplate = "{+baseurl}/privilegedAccess/{privilegedAccess%2Did}/roleSettings/{governanceRoleSetting%2Did}/roleDefinition{?%24select,%24expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewRoleDefinitionRequestBuilder instantiates a new RoleDefinitionRequestBuilder and sets the default values.
func NewRoleDefinitionRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*RoleDefinitionRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewRoleDefinitionRequestBuilderInternal(urlParams, requestAdapter)
}
// CreateDeleteRequestInformation delete navigation property roleDefinition for privilegedAccess
func (m *RoleDefinitionRequestBuilder) CreateDeleteRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreateDeleteRequestInformationWithRequestConfiguration(nil);
}
// CreateDeleteRequestInformationWithRequestConfiguration delete navigation property roleDefinition for privilegedAccess
func (m *RoleDefinitionRequestBuilder) CreateDeleteRequestInformationWithRequestConfiguration(requestConfiguration *RoleDefinitionRequestBuilderDeleteRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.DELETE
if requestConfiguration != nil {
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// CreateGetRequestInformation read-only. The role definition that is enforced with this role setting.
func (m *RoleDefinitionRequestBuilder) CreateGetRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreateGetRequestInformationWithRequestConfiguration(nil);
}
// CreateGetRequestInformationWithRequestConfiguration read-only. The role definition that is enforced with this role setting.
func (m *RoleDefinitionRequestBuilder) CreateGetRequestInformationWithRequestConfiguration(requestConfiguration *RoleDefinitionRequestBuilderGetRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.GET
if requestConfiguration != nil {
if requestConfiguration.QueryParameters != nil {
requestInfo.AddQueryParameters(*(requestConfiguration.QueryParameters))
}
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// CreatePatchRequestInformation update the navigation property roleDefinition in privilegedAccess
func (m *RoleDefinitionRequestBuilder) CreatePatchRequestInformation(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.GovernanceRoleDefinitionable)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreatePatchRequestInformationWithRequestConfiguration(body, nil);
}
// CreatePatchRequestInformationWithRequestConfiguration update the navigation property roleDefinition in privilegedAccess
func (m *RoleDefinitionRequestBuilder) CreatePatchRequestInformationWithRequestConfiguration(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.GovernanceRoleDefinitionable, requestConfiguration *RoleDefinitionRequestBuilderPatchRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.PATCH
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", body)
if requestConfiguration != nil {
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// Delete delete navigation property roleDefinition for privilegedAccess
func (m *RoleDefinitionRequestBuilder) Delete()(error) {
return m.DeleteWithRequestConfigurationAndResponseHandler(nil, nil);
}
// DeleteWithRequestConfigurationAndResponseHandler delete navigation property roleDefinition for privilegedAccess
func (m *RoleDefinitionRequestBuilder) DeleteWithRequestConfigurationAndResponseHandler(requestConfiguration *RoleDefinitionRequestBuilderDeleteRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(error) {
requestInfo, err := m.CreateDeleteRequestInformationWithRequestConfiguration(requestConfiguration);
if err != nil {
return err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
err = m.requestAdapter.SendNoContentAsync(requestInfo, responseHandler, errorMapping)
if err != nil {
return err
}
return nil
}
// Get read-only. The role definition that is enforced with this role setting.
func (m *RoleDefinitionRequestBuilder) Get()(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.GovernanceRoleDefinitionable, error) {
return m.GetWithRequestConfigurationAndResponseHandler(nil, nil);
}
// GetWithRequestConfigurationAndResponseHandler read-only. The role definition that is enforced with this role setting.
func (m *RoleDefinitionRequestBuilder) GetWithRequestConfigurationAndResponseHandler(requestConfiguration *RoleDefinitionRequestBuilderGetRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.GovernanceRoleDefinitionable, error) {
requestInfo, err := m.CreateGetRequestInformationWithRequestConfiguration(requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateGovernanceRoleDefinitionFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.GovernanceRoleDefinitionable), nil
}
// Patch update the navigation property roleDefinition in privilegedAccess
func (m *RoleDefinitionRequestBuilder) Patch(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.GovernanceRoleDefinitionable)(error) {
return m.PatchWithRequestConfigurationAndResponseHandler(body, nil, nil);
}
// PatchWithRequestConfigurationAndResponseHandler update the navigation property roleDefinition in privilegedAccess
func (m *RoleDefinitionRequestBuilder) PatchWithRequestConfigurationAndResponseHandler(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.GovernanceRoleDefinitionable, requestConfiguration *RoleDefinitionRequestBuilderPatchRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(error) {
requestInfo, err := m.CreatePatchRequestInformationWithRequestConfiguration(body, requestConfiguration);
if err != nil {
return err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
err = m.requestAdapter.SendNoContentAsync(requestInfo, responseHandler, errorMapping)
if err != nil {
return err
}
return nil
}
// Resource the resource property
func (m *RoleDefinitionRequestBuilder) Resource()(*i38a5d1ceec83c79e0342a12b7be83204903989ad114bcc422ab464c67c6ab1bb.ResourceRequestBuilder) {
return i38a5d1ceec83c79e0342a12b7be83204903989ad114bcc422ab464c67c6ab1bb.NewResourceRequestBuilderInternal(m.pathParameters, m.requestAdapter);
}
// RoleSetting the roleSetting property
func (m *RoleDefinitionRequestBuilder) RoleSetting()(*ie7c2e9df46952eaedb5a393f4e6f071d12bcfc57877fd35ccc5efc24af2e45ed.RoleSettingRequestBuilder) {
return ie7c2e9df46952eaedb5a393f4e6f071d12bcfc57877fd35ccc5efc24af2e45ed.NewRoleSettingRequestBuilderInternal(m.pathParameters, m.requestAdapter);
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
common-dup/src/test/java/org/duracloud/mill/dup/TestDuplicationPolicyManager.java
|
/*
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://duracloud.org/license/
*/
package org.duracloud.mill.dup;
import static org.hamcrest.CoreMatchers.hasItems;
import static org.junit.Assert.assertThat;
import java.util.Set;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import org.duracloud.mill.dup.repo.DuplicationPolicyRepo;
import org.duracloud.mill.dup.repo.S3DuplicationPolicyRepo;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Integration test for the DuplicatonPolicyManager which interacts with S3
*
* NOTE: This test expects that environment variables AWS_ACCESS_KEY_ID and
* AWS_SECRET_KEY be set to contain AWS credentials with read and write access
* to S3.
*
* @author Bill Branan
* Date: 11/1/13
*/
public class TestDuplicationPolicyManager extends BaseDuplicationPolicyTester {
private AmazonS3 s3Client;
private String bucketName;
@Before
public void setup() {
s3Client = AmazonS3ClientBuilder.standard().build();
// Create policy bucket
String accessKey = System.getenv("AWS_ACCESS_KEY_ID");
bucketName = accessKey.toLowerCase() + "." +
S3DuplicationPolicyRepo.DUP_POLICY_REPO_BUCKET_SUFFIX;
s3Client.createBucket(bucketName);
// Load accounts list
s3Client.putObject(bucketName,
DuplicationPolicyRepo.DUP_ACCOUNTS_NAME,
policyAccountsFile);
// Load policies
String acct1PolicyName =
"account1" + DuplicationPolicyRepo.DUP_POLICY_SUFFIX;
s3Client.putObject(bucketName, acct1PolicyName, policyFile);
String acct2PolicyName =
"account2" + DuplicationPolicyRepo.DUP_POLICY_SUFFIX;
s3Client.putObject(bucketName, acct2PolicyName, policyFile);
String acct3PolicyName =
"account3" + DuplicationPolicyRepo.DUP_POLICY_SUFFIX;
s3Client.putObject(bucketName, acct3PolicyName, policyFile);
}
@After
public void teardown() {
// Clear policy bucket contents
for (S3ObjectSummary object :
s3Client.listObjects(bucketName).getObjectSummaries()) {
s3Client.deleteObject(bucketName, object.getKey());
}
// Remove policy bucket
s3Client.deleteBucket(bucketName);
}
@Test
public void testDuplicationPolicyManager() {
DuplicationPolicyManager policyManager =
new DuplicationPolicyManager(new S3DuplicationPolicyRepo());
Set<String> dupAccounts = policyManager.getDuplicationAccounts();
assertThat(dupAccounts, hasItems("account1", "account2", "account3"));
for (String dupAccount : dupAccounts) {
DuplicationPolicy policy =
policyManager.getDuplicationPolicy(dupAccount);
assertThat(policy.getSpaces(), hasItems("testSpace1", "testSpace2"));
}
}
}
|
[
"\"AWS_ACCESS_KEY_ID\""
] |
[] |
[
"AWS_ACCESS_KEY_ID"
] |
[]
|
["AWS_ACCESS_KEY_ID"]
|
java
| 1 | 0 | |
setup.py
|
import os
import sys
import subprocess
import setuptools
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test
class TestCommand(test):
description = 'run tests, linters and create a coverage report'
user_options = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.returncode = 0
def finalize_options(self):
super().finalize_options()
# New setuptools don't need this anymore, thus the try block.
try:
# pylint: disable=attribute-defined-outside-init
self.test_args = []
self.test_suite = 'True'
except AttributeError:
pass
def run_tests(self):
self._call('python -m pytest --cov=layered test')
self._call('python -m pylint layered')
self._call('python -m pylint test')
self._call('python -m pylint setup.py')
self._check()
def _call(self, command):
env = os.environ.copy()
env['PYTHONPATH'] = ''.join(':' + x for x in sys.path)
print('Run command', command)
try:
subprocess.check_call(command.split(), env=env)
except subprocess.CalledProcessError as error:
print('Command failed with exit code', error.returncode)
self.returncode = 1
def _check(self):
if self.returncode:
sys.exit(self.returncode)
class BuildExtCommand(build_ext):
"""
Fix Numpy build error when bundled as a dependency.
From http://stackoverflow.com/a/21621689/1079110
"""
def finalize_options(self):
super().finalize_options()
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
DESCRIPTION = 'Clean reference implementation of feed forward neural networks'
SETUP_REQUIRES = [
'numpy',
'sphinx',
]
INSTALL_REQUIRES = [
'PyYAML',
'numpy',
'matplotlib',
]
TESTS_REQUIRE = [
'pytest',
'pytest-cov',
'pylint',
]
if __name__ == '__main__':
setuptools.setup(
name='layered',
version='0.1.8',
description=DESCRIPTION,
url='http://github.com/danijar/layered',
author='Danijar Hafner',
author_email='[email protected]',
license='MIT',
packages=['layered'],
setup_requires=SETUP_REQUIRES,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
cmdclass={'test': TestCommand, 'build_ext': BuildExtCommand},
entry_points={'console_scripts': ['layered=layered.__main__:main']},
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
build.go
|
// +build ignore
package main
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
)
const (
windows = "windows"
linux = "linux"
)
var (
//versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
goarch string
goos string
gocc string
cgo bool
libc string
pkgArch string
version string = "v1"
// deb & rpm does not support semver so have to handle their version a little differently
linuxPackageVersion string = "v1"
linuxPackageIteration string = ""
race bool
workingDir string
includeBuildId bool = true
buildId string = "0"
serverBinary string = "grafana-server"
cliBinary string = "grafana-cli"
binaries []string = []string{serverBinary, cliBinary}
isDev bool = false
enterprise bool = false
skipRpmGen bool = false
skipDebGen bool = false
printGenVersion bool = false
)
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
ensureGoPath()
var buildIdRaw string
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
flag.StringVar(&gocc, "cc", "", "CC")
flag.StringVar(&libc, "libc", "", "LIBC")
flag.BoolVar(&cgo, "cgo-enabled", cgo, "Enable cgo")
flag.StringVar(&pkgArch, "pkg-arch", "", "PKG ARCH")
flag.BoolVar(&race, "race", race, "Use race detector")
flag.BoolVar(&includeBuildId, "includeBuildId", includeBuildId, "IncludeBuildId in package name")
flag.BoolVar(&enterprise, "enterprise", enterprise, "Build enterprise version of Grafana")
flag.StringVar(&buildIdRaw, "buildId", "0", "Build ID from CI system")
flag.BoolVar(&isDev, "dev", isDev, "optimal for development, skips certain steps")
flag.BoolVar(&skipRpmGen, "skipRpm", skipRpmGen, "skip rpm package generation (default: false)")
flag.BoolVar(&skipDebGen, "skipDeb", skipDebGen, "skip deb package generation (default: false)")
flag.BoolVar(&printGenVersion, "gen-version", printGenVersion, "generate Grafana version and output (default: false)")
flag.Parse()
buildId = shortenBuildId(buildIdRaw)
readVersionFromPackageJson()
if pkgArch == "" {
pkgArch = goarch
}
if printGenVersion {
printGeneratedVersion()
return
}
log.Printf("Version: %s, Linux Version: %s, Package Iteration: %s\n", version, linuxPackageVersion, linuxPackageIteration)
if flag.NArg() == 0 {
log.Println("Usage: go run build.go build")
return
}
workingDir, _ = os.Getwd()
for _, cmd := range flag.Args() {
switch cmd {
case "setup":
setup()
case "build-srv", "build-server":
clean()
build("grafana-server", "./pkg/cmd/grafana-server", []string{})
case "build-cli":
clean()
build("grafana-cli", "./pkg/cmd/grafana-cli", []string{})
case "build":
//clean()
for _, binary := range binaries {
build(binary, "./pkg/cmd/"+binary, []string{})
}
case "build-frontend":
grunt(gruntBuildArg("build")...)
case "test":
test("./pkg/...")
grunt("test")
case "package":
grunt(gruntBuildArg("build")...)
grunt(gruntBuildArg("package")...)
if goos == linux {
createLinuxPackages()
}
case "package-only":
grunt(gruntBuildArg("package")...)
if goos == linux {
createLinuxPackages()
}
case "pkg-archive":
grunt(gruntBuildArg("package")...)
case "pkg-rpm":
grunt(gruntBuildArg("release")...)
createRpmPackages()
case "pkg-deb":
grunt(gruntBuildArg("release")...)
createDebPackages()
case "sha-dist":
shaFilesInDist()
case "latest":
makeLatestDistCopies()
case "clean":
clean()
default:
log.Fatalf("Unknown command %q", cmd)
}
}
}
func makeLatestDistCopies() {
files, err := ioutil.ReadDir("dist")
if err != nil {
log.Fatalf("failed to create latest copies. Cannot read from /dist")
}
latestMapping := map[string]string{
"_amd64.deb": "dist/grafana_latest_amd64.deb",
".x86_64.rpm": "dist/grafana-latest-1.x86_64.rpm",
".linux-amd64.tar.gz": "dist/grafana-latest.linux-x64.tar.gz",
".linux-amd64-musl.tar.gz": "dist/grafana-latest.linux-x64-musl.tar.gz",
".linux-armv7.tar.gz": "dist/grafana-latest.linux-armv7.tar.gz",
".linux-armv7-musl.tar.gz": "dist/grafana-latest.linux-armv7-musl.tar.gz",
".linux-armv6.tar.gz": "dist/grafana-latest.linux-armv6.tar.gz",
".linux-arm64.tar.gz": "dist/grafana-latest.linux-arm64.tar.gz",
".linux-arm64-musl.tar.gz": "dist/grafana-latest.linux-arm64-musl.tar.gz",
}
for _, file := range files {
for extension, fullName := range latestMapping {
if strings.HasSuffix(file.Name(), extension) {
runError("cp", path.Join("dist", file.Name()), fullName)
}
}
}
}
func readVersionFromPackageJson() {
reader, err := os.Open("package.json")
if err != nil {
log.Fatal("Failed to open package.json")
return
}
defer reader.Close()
jsonObj := map[string]interface{}{}
jsonParser := json.NewDecoder(reader)
if err := jsonParser.Decode(&jsonObj); err != nil {
log.Fatal("Failed to decode package.json")
}
version = jsonObj["version"].(string)
linuxPackageVersion = version
linuxPackageIteration = ""
// handle pre version stuff (deb / rpm does not support semver)
parts := strings.Split(version, "-")
if len(parts) > 1 {
linuxPackageVersion = parts[0]
linuxPackageIteration = parts[1]
}
// add timestamp to iteration
if includeBuildId {
if buildId != "0" {
linuxPackageIteration = fmt.Sprintf("%s%s", buildId, linuxPackageIteration)
} else {
linuxPackageIteration = fmt.Sprintf("%d%s", time.Now().Unix(), linuxPackageIteration)
}
}
}
type linuxPackageOptions struct {
packageType string
packageArch string
homeDir string
homeBinDir string
binPath string
serverBinPath string
cliBinPath string
configDir string
ldapFilePath string
etcDefaultPath string
etcDefaultFilePath string
initdScriptFilePath string
systemdServiceFilePath string
postinstSrc string
initdScriptSrc string
defaultFileSrc string
systemdFileSrc string
cliBinaryWrapperSrc string
depends []string
}
func createDebPackages() {
debPkgArch := pkgArch
if pkgArch == "armv7" || pkgArch == "armv6" {
debPkgArch = "armhf"
}
createPackage(linuxPackageOptions{
packageType: "deb",
packageArch: debPkgArch,
homeDir: "/usr/share/grafana",
homeBinDir: "/usr/share/grafana/bin",
binPath: "/usr/sbin",
configDir: "/etc/grafana",
etcDefaultPath: "/etc/default",
etcDefaultFilePath: "/etc/default/grafana-server",
initdScriptFilePath: "/etc/init.d/grafana-server",
systemdServiceFilePath: "/usr/lib/systemd/system/grafana-server.service",
postinstSrc: "packaging/deb/control/postinst",
initdScriptSrc: "packaging/deb/init.d/grafana-server",
defaultFileSrc: "packaging/deb/default/grafana-server",
systemdFileSrc: "packaging/deb/systemd/grafana-server.service",
cliBinaryWrapperSrc: "packaging/wrappers/grafana-cli",
depends: []string{"adduser", "libfontconfig1"},
})
}
func createRpmPackages() {
rpmPkgArch := pkgArch
switch {
case pkgArch == "armv7":
rpmPkgArch = "armhfp"
case pkgArch == "arm64":
rpmPkgArch = "aarch64"
}
createPackage(linuxPackageOptions{
packageType: "rpm",
packageArch: rpmPkgArch,
homeDir: "/usr/share/grafana",
homeBinDir: "/usr/share/grafana/bin",
binPath: "/usr/sbin",
configDir: "/etc/grafana",
etcDefaultPath: "/etc/sysconfig",
etcDefaultFilePath: "/etc/sysconfig/grafana-server",
initdScriptFilePath: "/etc/init.d/grafana-server",
systemdServiceFilePath: "/usr/lib/systemd/system/grafana-server.service",
postinstSrc: "packaging/rpm/control/postinst",
initdScriptSrc: "packaging/rpm/init.d/grafana-server",
defaultFileSrc: "packaging/rpm/sysconfig/grafana-server",
systemdFileSrc: "packaging/rpm/systemd/grafana-server.service",
cliBinaryWrapperSrc: "packaging/wrappers/grafana-cli",
depends: []string{"/sbin/service", "fontconfig", "freetype", "urw-fonts"},
})
}
func createLinuxPackages() {
if !skipDebGen {
createDebPackages()
}
if !skipRpmGen {
createRpmPackages()
}
}
func createPackage(options linuxPackageOptions) {
packageRoot, _ := ioutil.TempDir("", "grafana-linux-pack")
// create directories
runPrint("mkdir", "-p", filepath.Join(packageRoot, options.homeDir))
runPrint("mkdir", "-p", filepath.Join(packageRoot, options.configDir))
runPrint("mkdir", "-p", filepath.Join(packageRoot, "/etc/init.d"))
runPrint("mkdir", "-p", filepath.Join(packageRoot, options.etcDefaultPath))
runPrint("mkdir", "-p", filepath.Join(packageRoot, "/usr/lib/systemd/system"))
runPrint("mkdir", "-p", filepath.Join(packageRoot, "/usr/sbin"))
// copy grafana-cli wrapper
runPrint("cp", "-p", options.cliBinaryWrapperSrc, filepath.Join(packageRoot, "/usr/sbin/"+cliBinary))
// copy grafana-server binary
runPrint("cp", "-p", filepath.Join(workingDir, "tmp/bin/"+serverBinary), filepath.Join(packageRoot, "/usr/sbin/"+serverBinary))
// copy init.d script
runPrint("cp", "-p", options.initdScriptSrc, filepath.Join(packageRoot, options.initdScriptFilePath))
// copy environment var file
runPrint("cp", "-p", options.defaultFileSrc, filepath.Join(packageRoot, options.etcDefaultFilePath))
// copy systemd file
runPrint("cp", "-p", options.systemdFileSrc, filepath.Join(packageRoot, options.systemdServiceFilePath))
// copy release files
runPrint("cp", "-a", filepath.Join(workingDir, "tmp")+"/.", filepath.Join(packageRoot, options.homeDir))
// remove bin path
runPrint("rm", "-rf", filepath.Join(packageRoot, options.homeDir, "bin"))
// create /bin within home
runPrint("mkdir", "-p", filepath.Join(packageRoot, options.homeBinDir))
// The grafana-cli binary is exposed through a wrapper to ensure a proper
// configuration is in place. To enable that, we need to store the original
// binary in a separate location to avoid conflicts.
runPrint("cp", "-p", filepath.Join(workingDir, "tmp/bin/"+cliBinary), filepath.Join(packageRoot, options.homeBinDir, cliBinary))
args := []string{
"-s", "dir",
"--description", "Grafana",
"-C", packageRoot,
"--url", "https://grafana.com",
"--maintainer", "[email protected]",
"--config-files", options.initdScriptFilePath,
"--config-files", options.etcDefaultFilePath,
"--config-files", options.systemdServiceFilePath,
"--after-install", options.postinstSrc,
"--version", linuxPackageVersion,
"-p", "./dist",
}
name := "grafana"
if enterprise {
name += "-enterprise"
args = append(args, "--replaces", "grafana")
}
fmt.Printf("pkgArch is set to '%s', generated arch is '%s'\n", pkgArch, options.packageArch)
if pkgArch == "armv6" {
name += "-rpi"
args = append(args, "--replaces", "grafana")
}
args = append(args, "--name", name)
description := "Grafana"
if enterprise {
description += " Enterprise"
}
if !enterprise {
args = append(args, "--license", "\"Apache 2.0\"")
}
if options.packageType == "rpm" {
args = append(args, "--rpm-posttrans", "packaging/rpm/control/posttrans")
}
if options.packageType == "deb" {
args = append(args, "--deb-no-default-config-files")
}
if options.packageArch != "" {
args = append(args, "-a", options.packageArch)
}
if linuxPackageIteration != "" {
args = append(args, "--iteration", linuxPackageIteration)
}
// add dependencies
for _, dep := range options.depends {
args = append(args, "--depends", dep)
}
args = append(args, ".")
fmt.Println("Creating package: ", options.packageType)
runPrint("fpm", append([]string{"-t", options.packageType}, args...)...)
}
func ensureGoPath() {
if os.Getenv("GOPATH") == "" {
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
gopath := filepath.Clean(filepath.Join(cwd, "../../../../"))
log.Println("GOPATH is", gopath)
os.Setenv("GOPATH", gopath)
}
}
func grunt(params ...string) {
if runtime.GOOS == windows {
runPrint(`.\node_modules\.bin\grunt`, params...)
} else {
runPrint("./node_modules/.bin/grunt", params...)
}
}
func genPackageVersion() string {
if includeBuildId {
return fmt.Sprintf("%v-%v", linuxPackageVersion, linuxPackageIteration)
} else {
return version
}
}
func gruntBuildArg(task string) []string {
args := []string{task}
args = append(args, fmt.Sprintf("--pkgVer=%v", genPackageVersion()))
if pkgArch != "" {
args = append(args, fmt.Sprintf("--arch=%v", pkgArch))
}
if libc != "" {
args = append(args, fmt.Sprintf("--libc=%s", libc))
}
if enterprise {
args = append(args, "--enterprise")
}
args = append(args, fmt.Sprintf("--platform=%v", goos))
return args
}
func setup() {
runPrint("go", "install", "-v", "./pkg/cmd/grafana-server")
}
func printGeneratedVersion() {
fmt.Print(genPackageVersion())
}
func test(pkg string) {
setBuildEnv()
runPrint("go", "test", "-short", "-timeout", "60s", pkg)
}
func build(binaryName, pkg string, tags []string) {
libcPart := ""
if libc != "" {
libcPart = fmt.Sprintf("-%s", libc)
}
binary := fmt.Sprintf("./bin/%s-%s%s/%s", goos, goarch, libcPart, binaryName)
if isDev {
//don't include os/arch/libc in output path in dev environment
binary = fmt.Sprintf("./bin/%s", binaryName)
}
if goos == windows {
binary += ".exe"
}
if !isDev {
rmr(binary, binary+".md5")
}
args := []string{"build", "-buildmode=exe", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
}
if race {
args = append(args, "-race")
}
args = append(args, "-o", binary)
args = append(args, pkg)
if !isDev {
setBuildEnv()
runPrint("go", "version")
libcPart := ""
if libc != "" {
libcPart = fmt.Sprintf("/%s", libc)
}
fmt.Printf("Targeting %s/%s%s\n", goos, goarch, libcPart)
}
runPrint("go", args...)
if !isDev {
// Create an md5 checksum of the binary, to be included in the archive for
// automatic upgrades.
err := md5File(binary)
if err != nil {
log.Fatal(err)
}
}
}
func ldflags() string {
var b bytes.Buffer
b.WriteString("-w")
b.WriteString(fmt.Sprintf(" -X main.version=%s", version))
b.WriteString(fmt.Sprintf(" -X main.commit=%s", getGitSha()))
b.WriteString(fmt.Sprintf(" -X main.buildstamp=%d", buildStamp()))
b.WriteString(fmt.Sprintf(" -X main.buildBranch=%s", getGitBranch()))
if v := os.Getenv("LDFLAGS"); v != "" {
b.WriteString(fmt.Sprintf(" -extldflags \"%s\"", v))
}
return b.String()
}
func rmr(paths ...string) {
for _, path := range paths {
log.Println("rm -r", path)
os.RemoveAll(path)
}
}
func clean() {
if isDev {
return
}
rmr("dist")
rmr("tmp")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/grafana", goos, goarch)))
}
func setBuildEnv() {
os.Setenv("GOOS", goos)
if goos == windows {
// require windows >=7
os.Setenv("CGO_CFLAGS", "-D_WIN32_WINNT=0x0601")
}
if goarch != "amd64" || goos != linux {
// needed for all other archs
cgo = true
}
if strings.HasPrefix(goarch, "armv") {
os.Setenv("GOARCH", "arm")
os.Setenv("GOARM", goarch[4:])
} else {
os.Setenv("GOARCH", goarch)
}
if goarch == "386" {
os.Setenv("GO386", "387")
}
if cgo {
os.Setenv("CGO_ENABLED", "1")
}
if gocc != "" {
os.Setenv("CC", gocc)
}
}
func getGitBranch() string {
v, err := runError("git", "rev-parse", "--abbrev-ref", "HEAD")
if err != nil {
return "master"
}
return string(v)
}
func getGitSha() string {
v, err := runError("git", "rev-parse", "--short", "HEAD")
if err != nil {
return "unknown-dev"
}
return string(v)
}
func buildStamp() int64 {
// use SOURCE_DATE_EPOCH if set.
if s, _ := strconv.ParseInt(os.Getenv("SOURCE_DATE_EPOCH"), 10, 64); s > 0 {
return s
}
bs, err := runError("git", "show", "-s", "--format=%ct")
if err != nil {
return time.Now().Unix()
}
s, _ := strconv.ParseInt(string(bs), 10, 64)
return s
}
func runError(cmd string, args ...string) ([]byte, error) {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
if err != nil {
return nil, err
}
return bytes.TrimSpace(bs), nil
}
func runPrint(cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "))
ecmd := exec.Command(cmd, args...)
ecmd.Env = append(os.Environ(), "GO111MODULE=on")
ecmd.Stdout = os.Stdout
ecmd.Stderr = os.Stderr
err := ecmd.Run()
if err != nil {
log.Fatal(err)
}
}
func md5File(file string) error {
fd, err := os.Open(file)
if err != nil {
return err
}
defer fd.Close()
h := md5.New()
_, err = io.Copy(h, fd)
if err != nil {
return err
}
out, err := os.Create(file + ".md5")
if err != nil {
return err
}
_, err = fmt.Fprintf(out, "%x\n", h.Sum(nil))
if err != nil {
return err
}
return out.Close()
}
func shaFilesInDist() {
filepath.Walk("./dist", func(path string, f os.FileInfo, err error) error {
if path == "./dist" {
return nil
}
if !strings.Contains(path, ".sha256") {
err := shaFile(path)
if err != nil {
log.Printf("Failed to create sha file. error: %v\n", err)
}
}
return nil
})
}
func shaFile(file string) error {
fd, err := os.Open(file)
if err != nil {
return err
}
defer fd.Close()
h := sha256.New()
_, err = io.Copy(h, fd)
if err != nil {
return err
}
out, err := os.Create(file + ".sha256")
if err != nil {
return err
}
_, err = fmt.Fprintf(out, "%x\n", h.Sum(nil))
if err != nil {
return err
}
return out.Close()
}
func shortenBuildId(buildId string) string {
buildId = strings.Replace(buildId, "-", "", -1)
if len(buildId) < 9 {
return buildId
}
return buildId[0:8]
}
|
[
"\"GOPATH\"",
"\"LDFLAGS\"",
"\"GOPATH\"",
"\"SOURCE_DATE_EPOCH\""
] |
[] |
[
"GOPATH",
"LDFLAGS",
"SOURCE_DATE_EPOCH"
] |
[]
|
["GOPATH", "LDFLAGS", "SOURCE_DATE_EPOCH"]
|
go
| 3 | 0 | |
gluon/scheduler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Background processes made simple
---------------------------------
"""
from __future__ import print_function
import os
import re
import time
import multiprocessing
import sys
import threading
import traceback
import signal
import socket
import datetime
import logging
import optparse
import tempfile
import types
from functools import reduce
from json import loads, dumps
from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB, IS_EMPTY_OR
from gluon import IS_INT_IN_RANGE, IS_DATETIME, IS_IN_DB
from gluon.utils import web2py_uuid
from gluon._compat import Queue, long, iteritems, PY2
from gluon.storage import Storage
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.scheduler import Scheduler
def demo1(*args,**vars):
print('you passed args=%s and vars=%s' % (args, vars))
return 'done!'
def demo2():
1/0
scheduler = Scheduler(db,dict(demo1=demo1,demo2=demo2))
## run worker nodes with:
cd web2py
python web2py.py -K myapp
or
python gluon/scheduler.py -u sqlite://storage.sqlite \
-f applications/myapp/databases/ \
-t mytasks.py
(-h for info)
python scheduler.py -h
## schedule jobs using
http://127.0.0.1:8000/myapp/appadmin/insert/db/scheduler_task
## monitor scheduled jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_task.id>0
## view completed jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_run.id>0
## view workers
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_worker.id>0
"""
path = os.getcwd()
if 'WEB2PY_PATH' not in os.environ:
os.environ['WEB2PY_PATH'] = path
IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
QUEUED = 'QUEUED'
ASSIGNED = 'ASSIGNED'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
TIMEOUT = 'TIMEOUT'
STOPPED = 'STOPPED'
ACTIVE = 'ACTIVE'
TERMINATE = 'TERMINATE'
DISABLED = 'DISABLED'
KILL = 'KILL'
PICK = 'PICK'
STOP_TASK = 'STOP_TASK'
EXPIRED = 'EXPIRED'
SECONDS = 1
HEARTBEAT = 3 * SECONDS
MAXHIBERNATION = 10
CLEAROUT = '!clear!'
CALLABLETYPES = (types.LambdaType, types.FunctionType,
types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
class Task(object):
"""Defines a "task" object that gets passed from the main thread to the
executor's one
"""
def __init__(self, app, function, timeout, args='[]', vars='{}', **kwargs):
logger.debug(' new task allocated: %s.%s', app, function)
self.app = app
self.function = function
self.timeout = timeout
self.args = args # json
self.vars = vars # json
self.__dict__.update(kwargs)
def __str__(self):
return '<Task: %s>' % self.function
class TaskReport(object):
"""Defines a "task report" object that gets passed from the executor's
thread to the main one
"""
def __init__(self, status, result=None, output=None, tb=None):
logger.debug(' new task report: %s', status)
if tb:
logger.debug(' traceback: %s', tb)
else:
logger.debug(' result: %s', result)
self.status = status
self.result = result
self.output = output
self.tb = tb
def __str__(self):
return '<TaskReport: %s>' % self.status
class JobGraph(object):
"""Experimental: dependencies amongs tasks."""
def __init__(self, db, job_name):
self.job_name = job_name or 'job_0'
self.db = db
def add_deps(self, task_parent, task_child):
"""Create a dependency between task_parent and task_child."""
self.db.scheduler_task_deps.insert(task_parent=task_parent,
task_child=task_child,
job_name=self.job_name)
def validate(self, job_name=None):
"""Validate if all tasks job_name can be completed.
Checks if there are no mutual dependencies among tasks.
Commits at the end if successfull, or it rollbacks the entire
transaction. Handle with care!
"""
db = self.db
sd = db.scheduler_task_deps
if job_name:
q = sd.job_name == job_name
else:
q = sd.id > 0
edges = db(q).select()
nested_dict = {}
for row in edges:
k = row.task_parent
if k in nested_dict:
nested_dict[k].add(row.task_child)
else:
nested_dict[k] = set((row.task_child,))
try:
rtn = []
for k, v in nested_dict.items():
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, nested_dict.values()) - set(nested_dict.keys())
nested_dict.update(dict((item, set()) for item in extra_items_in_deps))
while True:
ordered = set(item for item, dep in nested_dict.items() if not dep)
if not ordered:
break
rtn.append(ordered)
nested_dict = dict(
(item, (dep - ordered)) for item, dep in nested_dict.items()
if item not in ordered
)
assert not nested_dict, "A cyclic dependency exists amongst %r" % nested_dict
db.commit()
return rtn
except:
db.rollback()
return None
class CronParser(object):
def __init__(self, cronline, base=None):
self.cronline = cronline
self.sched = base or datetime.datetime.now()
self.task = None
@staticmethod
def _rangetolist(s, period='min'):
retval = []
if s.startswith('*'):
if period == 'min':
s = s.replace('*', '0-59', 1)
elif period == 'hr':
s = s.replace('*', '0-23', 1)
elif period == 'dom':
s = s.replace('*', '1-31', 1)
elif period == 'mon':
s = s.replace('*', '1-12', 1)
elif period == 'dow':
s = s.replace('*', '0-6', 1)
m = re.compile(r'(\d+)-(\d+)/(\d+)')
match = m.match(s)
if match:
min_, max_ = int(match.group(1)), int(match.group(2)) + 1
step_ = int(match.group(3))
else:
m = re.compile(r'(\d+)/(\d+)')
ranges_max = {'min': 59, 'hr': 23, 'mon': 12, 'dom': 31, 'dow': 7}
match = m.match(s)
if match:
min_, max_ = int(match.group(1)), ranges_max[period] + 1
step_ = int(match.group(2))
if match:
for i in range(min_, max_, step_):
retval.append(i)
return retval
@staticmethod
def _sanitycheck(values, period):
if period == 'min':
check = all(0 <= i <= 59 for i in values)
elif period == 'hr':
check = all(0 <= i <= 23 for i in values)
elif period == 'dom':
domrange = list(range(1, 32)) + ['l']
check = all(i in domrange for i in values)
elif period == 'mon':
check = all(1 <= i <= 12 for i in values)
elif period == 'dow':
check = all(0 <= i <= 7 for i in values)
return check
def _parse(self):
line = self.cronline.lower()
task = {}
if line.startswith('@yearly'):
line = line.replace('@yearly', '0 0 1 1 *')
elif line.startswith('@annually'):
line = line.replace('@annually', '0 0 1 1 *')
elif line.startswith('@monthly'):
line = line.replace('@monthly', '0 0 1 * *')
elif line.startswith('@weekly'):
line = line.replace('@weekly', '0 0 * * 0')
elif line.startswith('@daily'):
line = line.replace('@daily', '0 0 * * *')
elif line.startswith('@midnight'):
line = line.replace('@midnight', '0 0 * * *')
elif line.startswith('@hourly'):
line = line.replace('@hourly', '0 * * * *')
params = line.strip().split()
if len(params) < 5:
raise ValueError('Invalid cron line (too short)')
elif len(params) > 5:
raise ValueError('Invalid cron line (too long)')
daysofweek = {'sun': 0, 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4,
'fri': 5, 'sat': 6}
monthsofyear = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5,
'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10,
'nov': 11, 'dec': 12, 'l': 'l'}
for (s, i) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']):
if s not in [None, '*']:
task[i] = []
vals = s.split(',')
for val in vals:
if i == 'dow':
refdict = daysofweek
elif i == 'mon':
refdict = monthsofyear
if i in ('dow', 'mon') and '-' in val and '/' not in val:
isnum = val.split('-')[0].isdigit()
if isnum:
val = '%s/1' % val
else:
val = '-'.join([str(refdict[v])
for v in val.split('-')])
if val != '-1' and '-' in val and '/' not in val:
val = '%s/1' % val
if '/' in val:
task[i] += self._rangetolist(val, i)
elif val.isdigit() or val == '-1':
task[i].append(int(val))
elif i in ('dow', 'mon'):
if val in refdict:
task[i].append(refdict[val])
elif i == 'dom' and val == 'l':
task[i].append(val)
if not task[i]:
raise ValueError('Invalid cron value (%s)' % s)
if not self._sanitycheck(task[i], i):
raise ValueError('Invalid cron value (%s)' % s)
task[i] = sorted(task[i])
self.task = task
@staticmethod
def _get_next_dow(sched, task):
task_dow = [a % 7 for a in task['dow']]
while sched.isoweekday() % 7 not in task_dow:
sched += datetime.timedelta(days=1)
return sched
@staticmethod
def _get_next_dom(sched, task):
if task['dom'] == ['l']:
last_feb = 29 if sched.year % 4 == 0 else 28
lastdayofmonth = [
31, last_feb, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
]
task_dom = [lastdayofmonth[sched.month - 1]]
else:
task_dom = task['dom']
while sched.day not in task_dom:
sched += datetime.timedelta(days=1)
return sched
@staticmethod
def _get_next_mon(sched, task):
while sched.month not in task['mon']:
if sched.month < 12:
sched = sched.replace(month=sched.month + 1)
else:
sched = sched.replace(month=1, year=sched.year + 1)
return sched
@staticmethod
def _getnext_hhmm(sched, task, add_to=True):
if add_to:
sched += datetime.timedelta(minutes=1)
if 'min' in task:
while sched.minute not in task['min']:
sched += datetime.timedelta(minutes=1)
if 'hr' in task and sched.hour not in task['hr']:
while sched.hour not in task['hr']:
sched += datetime.timedelta(hours=1)
return sched
def _getnext_date(self, sched, task):
if 'dow' in task and 'dom' in task:
dow = self._get_next_dow(sched, task)
dom = self._get_next_dom(sched, task)
sched = min(dow, dom)
elif 'dow' in task:
sched = self._get_next_dow(sched, task)
elif 'dom' in task:
sched = self._get_next_dom(sched, task)
if 'mon' in task:
sched = self._get_next_mon(sched, task)
return sched.replace(hour=0, minute=0)
def get_next(self):
"""Get next date according to specs."""
if not self.task:
self._parse()
task = self.task
sched = self.sched
x = 0
while x < 1000: # avoid potential max recursions
x += 1
try:
next_date = self._getnext_date(sched, task)
except (ValueError, OverflowError) as e:
raise ValueError('Invalid cron expression (%s)' % e)
if next_date.date() > self.sched.date():
# we rolled date, check for valid hhmm
sched = self._getnext_hhmm(next_date, task, False)
break
else:
# same date, get next hhmm
sched_time = self._getnext_hhmm(sched, task, True)
if sched_time.date() > sched.date():
# we rolled date again :(
sched = sched_time
else:
sched = sched_time
break
else:
raise ValueError('Potential bug found, please submit your '
'cron expression to the authors')
self.sched = sched
return sched
def __iter__(self):
"""Support iteration."""
return self
__next__ = next = get_next
# the two functions below deal with simplejson decoding as unicode, esp for the dict decode
# and subsequent usage as function Keyword arguments unicode variable names won't work!
# borrowed from http://stackoverflow.com/questions/956867/
def _decode_list(lst):
if not PY2:
return lst
newlist = []
for i in lst:
if isinstance(i, unicode):
i = i.encode('utf-8')
elif isinstance(i, list):
i = _decode_list(i)
newlist.append(i)
return newlist
def _decode_dict(dct):
if not PY2:
return dct
newdict = {}
for k, v in iteritems(dct):
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
elif isinstance(v, list):
v = _decode_list(v)
newdict[k] = v
return newdict
def executor(queue, task, out):
"""The function used to execute tasks in the background process."""
logger.debug(' task started')
class LogOutput(object):
"""Facility to log output at intervals."""
def __init__(self, out_queue):
self.out_queue = out_queue
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
def flush(self):
pass
def write(self, data):
self.out_queue.put(data)
W2P_TASK = Storage({
'id': task.task_id,
'uuid': task.uuid,
'run_id': task.run_id
})
stdout = LogOutput(out)
try:
if task.app:
os.chdir(os.environ['WEB2PY_PATH'])
from gluon.shell import env, parse_path_info
from gluon import current
level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.WARN)
# Get controller-specific subdirectory if task.app is of
# form 'app/controller'
(a, c, f) = parse_path_info(task.app)
_env = env(a=a, c=c, import_models=True,
extra_request={'is_scheduler': True})
logging.getLogger().setLevel(level)
f = task.function
functions = current._scheduler.tasks
if not functions:
# look into env
_function = _env.get(f)
else:
_function = functions.get(f)
if not isinstance(_function, CALLABLETYPES):
raise NameError(
"name '%s' not found in scheduler's environment" % f)
# Inject W2P_TASK into environment
_env.update({'W2P_TASK': W2P_TASK})
# Inject W2P_TASK into current
from gluon import current
current.W2P_TASK = W2P_TASK
globals().update(_env)
args = _decode_list(loads(task.args))
vars = loads(task.vars, object_hook=_decode_dict)
result = dumps(_function(*args, **vars))
else:
# for testing purpose only
result = eval(task.function)(
*loads(task.args, object_hook=_decode_dict),
**loads(task.vars, object_hook=_decode_dict))
if len(result) >= 1024:
fd, temp_path = tempfile.mkstemp(suffix='.w2p_sched')
with os.fdopen(fd, 'w') as f:
f.write(result)
result = 'w2p_special:%s' % temp_path
queue.put(TaskReport('COMPLETED', result=result))
except BaseException as e:
tb = traceback.format_exc()
queue.put(TaskReport('FAILED', tb=tb))
del stdout
class MetaScheduler(threading.Thread):
"""Base class documenting scheduler's base methods."""
def __init__(self):
threading.Thread.__init__(self)
self.process = None # the background process
self.have_heartbeat = True # set to False to kill
self.empty_runs = 0
def local_async(self, task):
"""Start the background process.
Args:
task : a `Task` object
Returns:
tuple: containing::
('ok',result,output)
('error',exception,None)
('timeout',None,None)
('terminated',None,None)
"""
db = self.db
sr = db.scheduler_run
out = multiprocessing.Queue()
queue = multiprocessing.Queue(maxsize=1)
p = multiprocessing.Process(target=executor, args=(queue, task, out))
self.process = p
logger.debug(' task starting')
p.start()
task_output = ""
tout = ""
try:
if task.sync_output > 0:
run_timeout = task.sync_output
else:
run_timeout = task.timeout
start = time.time()
while p.is_alive() and (not task.timeout or time.time() - start < task.timeout):
if tout:
try:
logger.debug(' partial output saved')
db(sr.id == task.run_id).update(run_output=task_output)
db.commit()
except:
pass
p.join(timeout=run_timeout)
tout = ""
while not out.empty():
tout += out.get()
if tout:
logger.debug(' partial output: "%s"', str(tout))
if CLEAROUT in tout:
task_output = tout[
tout.rfind(CLEAROUT) + len(CLEAROUT):]
else:
task_output += tout
except:
p.terminate()
p.join()
logger.debug(' task stopped by general exception')
tr = TaskReport(STOPPED)
else:
if p.is_alive():
p.terminate()
logger.debug(' task timeout')
try:
# we try to get a traceback here
tr = queue.get(timeout=2)
tr.status = TIMEOUT
tr.output = task_output
except Queue.Empty:
tr = TaskReport(TIMEOUT)
elif queue.empty():
logger.debug(' task stopped')
tr = TaskReport(STOPPED)
else:
logger.debug(' task completed or failed')
tr = queue.get()
result = tr.result
if result and result.startswith('w2p_special'):
temp_path = result.replace('w2p_special:', '', 1)
with open(temp_path) as f:
tr.result = f.read()
os.unlink(temp_path)
tr.output = task_output
return tr
def die(self):
"""Forces termination of the worker process along with any running
task"""
logger.info('die!')
self.have_heartbeat = False
self.terminate_process()
def give_up(self):
"""Waits for any running task to be executed, then exits the worker
process"""
logger.info('Giving up as soon as possible!')
self.have_heartbeat = False
def terminate_process(self):
"""Terminate any running tasks (internal use only)"""
try:
self.process.terminate()
except:
pass # no process to terminate
def run(self):
"""This is executed by the main thread to send heartbeats"""
counter = 0
while self.have_heartbeat:
self.send_heartbeat(counter)
counter += 1
def start_heartbeats(self):
self.start()
def send_heartbeat(self, counter):
raise NotImplementedError
def pop_task(self):
"""Fetches a task ready to be executed"""
raise NotImplementedError
def report_task(self, task, task_report):
"""Creates a task report"""
raise NotImplementedError
def sleep(self):
raise NotImplementedError
def loop(self):
"""Main loop, fetching tasks and starting executor's background
processes"""
raise NotImplementedError
TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED, EXPIRED)
RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED)
WORKER_STATUS = (ACTIVE, PICK, DISABLED, TERMINATE, KILL, STOP_TASK)
class IS_CRONLINE(object):
"""
Validates cronline
"""
def __init__(self, error_message=None):
self.error_message = error_message
def __call__(self, value):
recur = CronParser(value, datetime.datetime.now())
try:
recur.get_next()
return (value, None)
except (KeyError, ValueError) as e:
if not self.error_message:
return (value, e)
return (value, self.error_message)
class TYPE(object):
"""
Validator that checks whether field is valid json and validates its type.
Used for `args` and `vars` of the scheduler_task table
"""
def __init__(self, myclass=list, parse=False):
self.myclass = myclass
self.parse = parse
def __call__(self, value):
from gluon import current
try:
obj = loads(value)
except:
return (value, current.T('invalid json'))
else:
if isinstance(obj, self.myclass):
if self.parse:
return (obj, None)
else:
return (value, None)
else:
return (value, current.T('Not of type: %s') % self.myclass)
class Scheduler(MetaScheduler):
"""Scheduler object
Args:
db: DAL connection where Scheduler will create its tables
tasks(dict): either a dict containing name-->func or None.
If None, functions will be searched in the environment
migrate(bool): turn migration on/off for the Scheduler's tables
worker_name(str): force worker_name to identify each process.
Leave it to None to autoassign a name (hostname#pid)
group_names(list): process tasks belonging to this group
defaults to ['main'] if nothing gets passed
heartbeat(int): how many seconds the worker sleeps between one
execution and the following one. Indirectly sets how many seconds
will pass between checks for new tasks
max_empty_runs(int): how many loops are allowed to pass without
processing any tasks before exiting the process. 0 to keep always
the process alive
discard_results(bool): Scheduler stores executions's details into the
scheduler_run table. By default, only if there is a result the
details are kept. Turning this to True means discarding results
even for tasks that return something
utc_time(bool): do all datetime calculations assuming UTC as the
timezone. Remember to pass `start_time` and `stop_time` to tasks
accordingly
"""
def __init__(self, db, tasks=None, migrate=True,
worker_name=None, group_names=None, heartbeat=HEARTBEAT,
max_empty_runs=0, discard_results=False, utc_time=False):
MetaScheduler.__init__(self)
self.db = db
self.db_thread = None
self.tasks = tasks
self.group_names = group_names or ['main']
self.heartbeat = heartbeat
self.worker_name = worker_name or IDENTIFIER
self.max_empty_runs = max_empty_runs
self.discard_results = discard_results
self.is_a_ticker = False
self.do_assign_tasks = False
self.greedy = False
self.utc_time = utc_time
self.w_stats = Storage(
dict(
status=RUNNING,
sleep=heartbeat,
total=0,
errors=0,
empty_runs=0,
queue=0,
distribution=None,
workers=0)
) # dict holding statistics
from gluon import current
current._scheduler = self
self.define_tables(db, migrate=migrate)
def __get_migrate(self, tablename, migrate=True):
if migrate is False:
return False
elif migrate is True:
return True
elif isinstance(migrate, str):
return "%s%s.table" % (migrate, tablename)
return True
def now(self):
"""Shortcut that fetches current time based on UTC preferences."""
return self.utc_time and datetime.datetime.utcnow() or datetime.datetime.now()
def set_requirements(self, scheduler_task):
"""Called to set defaults for lazy_tables connections."""
from gluon import current
if hasattr(current, 'request'):
scheduler_task.application_name.default = '%s/%s' % (
current.request.application, current.request.controller
)
def define_tables(self, db, migrate):
"""Define Scheduler tables structure."""
from pydal.base import DEFAULT
logger.debug('defining tables (migrate=%s)', migrate)
now = self.now
db.define_table(
'scheduler_task',
Field('application_name', requires=IS_NOT_EMPTY(),
default=None, writable=False),
Field('task_name', default=None),
Field('group_name', default='main'),
Field('status', requires=IS_IN_SET(TASK_STATUS),
default=QUEUED, writable=False),
Field('function_name',
requires=IS_IN_SET(sorted(self.tasks.keys()))
if self.tasks else DEFAULT),
Field('uuid', length=255,
requires=IS_NOT_IN_DB(db, 'scheduler_task.uuid'),
unique=True, default=web2py_uuid),
Field('args', 'text', default='[]', requires=TYPE(list)),
Field('vars', 'text', default='{}', requires=TYPE(dict)),
Field('enabled', 'boolean', default=True),
Field('start_time', 'datetime', default=now,
requires=IS_DATETIME()),
Field('next_run_time', 'datetime', default=now),
Field('stop_time', 'datetime'),
Field('repeats', 'integer', default=1, comment="0=unlimited",
requires=IS_INT_IN_RANGE(0, None)),
Field('retry_failed', 'integer', default=0, comment="-1=unlimited",
requires=IS_INT_IN_RANGE(-1, None)),
Field('period', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(0, None)),
Field('prevent_drift', 'boolean', default=False,
comment='Exact start_times between runs'),
Field('cronline', default=None,
comment='Discard "period", use this cron expr instead',
requires=IS_EMPTY_OR(IS_CRONLINE())),
Field('timeout', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(1, None)),
Field('sync_output', 'integer', default=0,
comment="update output every n sec: 0=never",
requires=IS_INT_IN_RANGE(0, None)),
Field('times_run', 'integer', default=0, writable=False),
Field('times_failed', 'integer', default=0, writable=False),
Field('last_run_time', 'datetime', writable=False, readable=False),
Field('assigned_worker_name', default='', writable=False),
on_define=self.set_requirements,
migrate=self.__get_migrate('scheduler_task', migrate),
format='(%(id)s) %(task_name)s')
db.define_table(
'scheduler_run',
Field('task_id', 'reference scheduler_task'),
Field('status', requires=IS_IN_SET(RUN_STATUS)),
Field('start_time', 'datetime'),
Field('stop_time', 'datetime'),
Field('run_output', 'text'),
Field('run_result', 'text'),
Field('traceback', 'text'),
Field('worker_name', default=self.worker_name),
migrate=self.__get_migrate('scheduler_run', migrate)
)
db.define_table(
'scheduler_worker',
Field('worker_name', length=255, unique=True),
Field('first_heartbeat', 'datetime'),
Field('last_heartbeat', 'datetime'),
Field('status', requires=IS_IN_SET(WORKER_STATUS)),
Field('is_ticker', 'boolean', default=False, writable=False),
Field('group_names', 'list:string', default=self.group_names),
Field('worker_stats', 'json'),
migrate=self.__get_migrate('scheduler_worker', migrate)
)
db.define_table(
'scheduler_task_deps',
Field('job_name', default='job_0'),
Field('task_parent', 'integer',
requires=IS_IN_DB(db, 'scheduler_task.id', '%(task_name)s')
),
Field('task_child', 'reference scheduler_task'),
Field('can_visit', 'boolean', default=False),
migrate=self.__get_migrate('scheduler_task_deps', migrate)
)
if migrate is not False:
db.commit()
def loop(self, worker_name=None):
"""Main loop.
This works basically as a neverending loop that:
- checks if the worker is ready to process tasks (is not DISABLED)
- pops a task from the queue
- if there is a task:
- spawns the executor background process
- waits for the process to be finished
- sleeps `heartbeat` seconds
- if there is not a task:
- checks for max_empty_runs
- sleeps `heartbeat` seconds
"""
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
try:
self.start_heartbeats()
while self.have_heartbeat:
if self.w_stats.status == DISABLED:
logger.debug('Someone stopped me, sleeping until better'
' times come (%s)', self.w_stats.sleep)
self.sleep()
continue
logger.debug('looping...')
task = self.wrapped_pop_task()
if task:
self.w_stats.empty_runs = 0
self.w_stats.status = RUNNING
self.w_stats.total += 1
self.wrapped_report_task(task, self.local_async(task))
if not self.w_stats.status == DISABLED:
self.w_stats.status = ACTIVE
else:
self.w_stats.empty_runs += 1
logger.debug('sleeping...')
if self.max_empty_runs != 0:
logger.debug('empty runs %s/%s',
self.w_stats.empty_runs,
self.max_empty_runs)
if self.w_stats.empty_runs >= self.max_empty_runs:
logger.info(
'empty runs limit reached, killing myself')
self.die()
self.sleep()
except (KeyboardInterrupt, SystemExit):
logger.info('catched')
self.die()
def wrapped_assign_tasks(self, db):
"""Commodity function to call `assign_tasks` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `assign_task` after 0.5 seconds
"""
logger.debug('Assigning tasks...')
db.commit() # db.commit() only for Mysql
x = 0
while x < 10:
try:
self.assign_tasks(db)
db.commit()
logger.debug('Tasks assigned...')
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error('TICKER: error assigning tasks (%s)', x)
x += 1
time.sleep(0.5)
def wrapped_pop_task(self):
"""Commodity function to call `pop_task` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
db.commit() # another nifty db.commit() only for Mysql
x = 0
while x < 10:
try:
rtn = self.pop_task(db)
return rtn
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error popping tasks')
x += 1
time.sleep(0.5)
def pop_task(self, db):
"""Grab a task ready to be executed from the queue."""
now = self.now()
st = self.db.scheduler_task
if self.is_a_ticker and self.do_assign_tasks:
# I'm a ticker, and 5 loops passed without reassigning tasks,
# let's do that and loop again
self.wrapped_assign_tasks(db)
return None
# ready to process something
grabbed = db(
(st.assigned_worker_name == self.worker_name) &
(st.status == ASSIGNED)
)
task = grabbed.select(limitby=(0, 1), orderby=st.next_run_time).first()
if task:
task.update_record(status=RUNNING, last_run_time=now)
# noone will touch my task!
db.commit()
logger.debug(' work to do %s', task.id)
else:
if self.is_a_ticker and self.greedy:
# there are other tasks ready to be assigned
logger.info('TICKER: greedy loop')
self.wrapped_assign_tasks(db)
else:
logger.info('nothing to do')
return None
times_run = task.times_run + 1
if task.cronline:
cron_recur = CronParser(task.cronline, now.replace(second=0))
next_run_time = cron_recur.get_next()
elif not task.prevent_drift:
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period
)
else:
# calc next_run_time based on available slots
# see #1191
next_run_time = task.start_time
secondspassed = (now - next_run_time).total_seconds()
steps = secondspassed // task.period + 1
next_run_time += datetime.timedelta(seconds=task.period * steps)
if times_run < task.repeats or task.repeats == 0:
# need to run (repeating task)
run_again = True
else:
# no need to run again
run_again = False
run_id = 0
while True and not self.discard_results:
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
task_id=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
time.sleep(0.5)
db.rollback()
logger.info('new task %(id)s "%(task_name)s"'
' %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args, # in json
vars=task.vars, # in json
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output,
uuid=task.uuid)
def wrapped_report_task(self, task, task_report):
"""Commodity function to call `report_task` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
while True:
try:
self.report_task(task, task_report)
db.commit()
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error storing result')
time.sleep(0.5)
def report_task(self, task, task_report):
"""Take care of storing the result according to preferences.
Deals with logic for repeating tasks.
"""
db = self.db
now = self.now()
st = db.scheduler_task
sr = db.scheduler_run
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
# result is 'null' as a string if task completed
# if it's stopped it's None as NoneType, so we record
# the STOPPED "run" anyway
logger.debug(' recording task report in db (%s)',
task_report.status)
db(sr.id == task.run_id).update(
status=task_report.status,
stop_time=now,
run_result=task_report.result,
run_output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(sr.id == task.run_id).delete()
# if there is a stop_time and the following run would exceed it
is_expired = (task.stop_time and
task.next_run_time > task.stop_time and
True or False)
status = (task.run_again and is_expired and EXPIRED or
task.run_again and not is_expired and
QUEUED or COMPLETED)
if task_report.status == COMPLETED:
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0
)
db(st.id == task.task_id).update(**d)
if status == COMPLETED:
self.update_dependencies(db, task.task_id)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'FAILED'}[task_report.status]
status = (task.retry_failed
and task.times_failed < task.retry_failed
and QUEUED or task.retry_failed == -1
and QUEUED or st_mapping)
db(st.id == task.task_id).update(
times_failed=st.times_failed + 1,
next_run_time=task.next_run_time,
status=status
)
logger.info('task completed (%s)', task_report.status)
def update_dependencies(self, db, task_id):
"""Unblock execution paths for Jobs."""
db(db.scheduler_task_deps.task_child == task_id).update(can_visit=True)
def adj_hibernation(self):
"""Used to increase the "sleep" interval for DISABLED workers."""
if self.w_stats.status == DISABLED:
wk_st = self.w_stats.sleep
hibernation = wk_st + HEARTBEAT if wk_st < MAXHIBERNATION else MAXHIBERNATION
self.w_stats.sleep = hibernation
def send_heartbeat(self, counter):
"""Coordination among available workers.
It:
- sends the heartbeat
- elects a ticker among available workers (the only process that
effectively dispatch tasks to workers)
- deals with worker's statuses
- does "housecleaning" for dead workers
- triggers tasks assignment to workers
"""
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder, decode_credentials=True)
self.define_tables(self.db_thread, migrate=False)
try:
db = self.db_thread
sw, st = db.scheduler_worker, db.scheduler_task
now = self.now()
# record heartbeat
mybackedstatus = db(sw.worker_name == self.worker_name).select().first()
if not mybackedstatus:
sw.insert(status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=now, last_heartbeat=now,
group_names=self.group_names,
worker_stats=self.w_stats)
self.w_stats.status = ACTIVE
self.w_stats.sleep = self.heartbeat
mybackedstatus = ACTIVE
else:
mybackedstatus = mybackedstatus.status
if mybackedstatus == DISABLED:
# keep sleeping
self.w_stats.status = DISABLED
logger.debug('........recording heartbeat (%s)',
self.w_stats.status)
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now,
worker_stats=self.w_stats)
elif mybackedstatus == TERMINATE:
self.w_stats.status = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
elif mybackedstatus == KILL:
self.w_stats.status = KILL
self.die()
return
else:
if mybackedstatus == STOP_TASK:
logger.info('Asked to kill the current task')
self.terminate_process()
logger.debug('........recording heartbeat (%s)',
self.w_stats.status)
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now, status=ACTIVE,
worker_stats=self.w_stats)
self.w_stats.sleep = self.heartbeat # re-activating the process
if self.w_stats.status != RUNNING:
self.w_stats.status = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0 or mybackedstatus == PICK:
try:
# delete dead workers
expiration = now - datetime.timedelta(
seconds=self.heartbeat * 3)
departure = now - datetime.timedelta(
seconds=self.heartbeat * 3 * 15)
logger.debug(
' freeing workers that have not sent heartbeat')
dead_workers = db(
((sw.last_heartbeat < expiration) & (sw.status == ACTIVE)) |
((sw.last_heartbeat < departure) & (sw.status != ACTIVE))
)
dead_workers_name = dead_workers._select(sw.worker_name)
db(
(st.assigned_worker_name.belongs(dead_workers_name)) &
(st.status == RUNNING)
).update(assigned_worker_name='', status=QUEUED)
dead_workers.delete()
try:
self.is_a_ticker = self.being_a_ticker()
except:
logger.error('Error coordinating TICKER')
if self.w_stats.status == ACTIVE:
self.do_assign_tasks = True
except:
logger.error('Error cleaning up')
db.commit()
except:
logger.error('Error retrieving status')
db.rollback()
self.adj_hibernation()
self.sleep()
def being_a_ticker(self):
"""Elect a TICKER process that assigns tasks to available workers.
Does its best to elect a worker that is not busy processing other tasks
to allow a proper distribution of tasks among all active workers ASAP
"""
db = self.db_thread
sw = db.scheduler_worker
my_name = self.worker_name
all_active = db(
(sw.worker_name != my_name) & (sw.status == ACTIVE)
).select(sw.is_ticker, sw.worker_name)
ticker = all_active.find(lambda row: row.is_ticker is True).first()
not_busy = self.w_stats.status == ACTIVE
if not ticker:
# if no other tickers are around
if not_busy:
# only if I'm not busy
db(sw.worker_name == my_name).update(is_ticker=True)
db(sw.worker_name != my_name).update(is_ticker=False)
logger.info("TICKER: I'm a ticker")
else:
# I'm busy
if len(all_active) >= 1:
# so I'll "downgrade" myself to a "poor worker"
db(sw.worker_name == my_name).update(is_ticker=False)
else:
not_busy = True
db.commit()
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker.worker_name)
return False
def assign_tasks(self, db):
"""Assign task to workers, that can then pop them from the queue.
Deals with group_name(s) logic, in order to assign linearly tasks
to available workers for those groups
"""
sw, st, sd = db.scheduler_worker, db.scheduler_task, db.scheduler_task_deps
now = self.now()
all_workers = db(sw.status == ACTIVE).select()
# build workers as dict of groups
wkgroups = {}
for w in all_workers:
if w.worker_stats['status'] == 'RUNNING':
continue
group_names = w.group_names
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
# set queued tasks that expired between "runs" (i.e., you turned off
# the scheduler): then it wasn't expired, but now it is
db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.stop_time < now)
).update(status=EXPIRED)
# calculate dependencies
deps_with_no_deps = db(
(sd.can_visit == False) &
(~sd.task_child.belongs(
db(sd.can_visit == False)._select(sd.task_parent)
)
)
)._select(sd.task_child)
no_deps = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(
(sd.id == None) | (st.id.belongs(deps_with_no_deps))
)
)._select(st.id, distinct=True, left=sd.on(
(st.id == sd.task_parent) &
(sd.can_visit == False)
)
)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.next_run_time <= now) &
(st.enabled == True) &
(st.id.belongs(no_deps))
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
# if there are a moltitude of tasks, let's figure out a maximum of
# tasks per worker. This can be further tuned with some added
# intelligence (like esteeming how many tasks will a worker complete
# before the ticker reassign them around, but the gain is quite small
# 50 is a sweet spot also for fast tasks, with sane heartbeat values
# NB: ticker reassign tasks every 5 cycles, so if a worker completes
# its 50 tasks in less than heartbeat*5 seconds,
# it won't pick new tasks until heartbeat*5 seconds pass.
# If a worker is currently elaborating a long task, its tasks needs to
# be reassigned to other workers
# this shuffles up things a bit, in order to give a task equal chances
# to be executed
# let's freeze it up
db.commit()
x = 0
for group in wkgroups.keys():
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby=st.next_run_time)
# let's break up the queue evenly among workers
for task in tasks:
x += 1
gname = task.group_name
ws = wkgroups.get(gname)
if ws:
counter = 0
myw = 0
for i, w in enumerate(ws['workers']):
if w['c'] < counter:
myw = i
counter = w['c']
assigned_wn = wkgroups[gname]['workers'][myw]['name']
d = dict(
status=ASSIGNED,
assigned_worker_name=assigned_wn
)
db(
(st.id == task.id) &
(st.status.belongs((QUEUED, ASSIGNED)))
).update(**d)
wkgroups[gname]['workers'][myw]['c'] += 1
db.commit()
# I didn't report tasks but I'm working nonetheless!!!!
if x > 0:
self.w_stats.empty_runs = 0
self.w_stats.queue = x
self.w_stats.distribution = wkgroups
self.w_stats.workers = len(all_workers)
# I'll be greedy only if tasks assigned are equal to the limit
# (meaning there could be others ready to be assigned)
self.greedy = x >= limit
logger.info('TICKER: workers are %s', len(all_workers))
logger.info('TICKER: tasks are %s', x)
def sleep(self):
"""Calculate the number of seconds to sleep."""
time.sleep(self.w_stats.sleep)
# should only sleep until next available task
def set_worker_status(self, group_names=None, action=ACTIVE,
exclude=None, limit=None, worker_name=None):
"""Internal function to set worker's status."""
ws = self.db.scheduler_worker
if not group_names:
group_names = self.group_names
elif isinstance(group_names, str):
group_names = [group_names]
if worker_name:
self.db(ws.worker_name == worker_name).update(status=action)
return
exclusion = exclude and exclude.append(action) or [action]
if not limit:
for group in group_names:
self.db(
(ws.group_names.contains(group)) &
(~ws.status.belongs(exclusion))
).update(status=action)
else:
for group in group_names:
workers = self.db((ws.group_names.contains(group)) &
(~ws.status.belongs(exclusion))
)._select(ws.id, limitby=(0, limit))
self.db(ws.id.belongs(workers)).update(status=action)
def disable(self, group_names=None, limit=None, worker_name=None):
"""Set DISABLED on the workers processing `group_names` tasks.
A DISABLED worker will be kept alive but it won't be able to process
any waiting tasks, essentially putting it to sleep.
By default, all group_names of Scheduler's instantation are selected
"""
self.set_worker_status(
group_names=group_names,
action=DISABLED,
exclude=[DISABLED, KILL, TERMINATE],
limit=limit)
def resume(self, group_names=None, limit=None, worker_name=None):
"""Wakes a worker up (it will be able to process queued tasks)"""
self.set_worker_status(
group_names=group_names,
action=ACTIVE,
exclude=[KILL, TERMINATE],
limit=limit)
def terminate(self, group_names=None, limit=None, worker_name=None):
"""Sets TERMINATE as worker status. The worker will wait for any
currently running tasks to be executed and then it will exit gracefully
"""
self.set_worker_status(
group_names=group_names,
action=TERMINATE,
exclude=[KILL],
limit=limit)
def kill(self, group_names=None, limit=None, worker_name=None):
"""Sets KILL as worker status. The worker will be killed even if it's
processing a task."""
self.set_worker_status(
group_names=group_names,
action=KILL,
limit=limit)
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
"""
Queue tasks. This takes care of handling the validation of all
parameters
Args:
function: the function (anything callable with a __name__)
pargs: "raw" args to be passed to the function. Automatically
jsonified.
pvars: "raw" kwargs to be passed to the function. Automatically
jsonified
kwargs: all the parameters available (basically, every
`scheduler_task` column). If args and vars are here, they
should be jsonified already, and they will override pargs
and pvars
Returns:
a dict just as a normal validate_and_insert(), plus a uuid key
holding the uuid of the queued task. If validation is not passed
( i.e. some parameters are invalid) both id and uuid will be None,
and you'll get an "error" dict holding the errors found.
"""
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None
cronline = kwargs.get('cronline')
kwargs.update(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
)
if cronline:
try:
start_time = kwargs.get('start_time', self.now)
next_run_time = CronParser(cronline, start_time).get_next()
kwargs.update(start_time=start_time, next_run_time=next_run_time)
except:
pass
if 'start_time' in kwargs and 'next_run_time' not in kwargs:
kwargs.update(next_run_time=kwargs['start_time'])
rtn = self.db.scheduler_task.validate_and_insert(**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
if immediate:
self.db(
(self.db.scheduler_worker.is_ticker == True)
).update(status=PICK)
else:
rtn.uuid = None
return rtn
def task_status(self, ref, output=False):
"""
Retrieves task status and optionally the result of the task
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
- a `Query` : lookup as you wish, e.g. ::
db.scheduler_task.task_name == 'test1'
output(bool): if `True`, fetch also the scheduler_run record
Returns:
a single Row object, for the last queued task.
If output == True, returns also the last scheduler_run record.
The scheduler_run record is fetched by a left join, so it can
have all fields == None
"""
from pydal.objects import Query
sr, st = self.db.scheduler_run, self.db.scheduler_task
if isinstance(ref, (int, long)):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
elif isinstance(ref, Query):
q = ref
else:
raise SyntaxError(
"You can retrieve results only by id, uuid or Query")
fields = [st.ALL]
left = False
orderby = ~st.id
if output:
fields = st.ALL, sr.ALL
left = sr.on(sr.task_id == st.id)
orderby = ~st.id | ~sr.id
row = self.db(q).select(
*fields,
**dict(orderby=orderby,
left=left,
limitby=(0, 1))
).first()
if row and output:
row.result = row.scheduler_run.run_result and \
loads(row.scheduler_run.run_result,
object_hook=_decode_dict) or None
return row
def stop_task(self, ref):
"""Shortcut for task termination.
If the task is RUNNING it will terminate it, meaning that status
will be set as FAILED.
If the task is QUEUED, its stop_time will be set as to "now",
the enabled flag will be set to False, and the status to STOPPED
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
Returns:
- 1 if task was stopped (meaning an update has been done)
- None if task was not found, or if task was not RUNNING or QUEUED
Note:
Experimental
"""
st, sw = self.db.scheduler_task, self.db.scheduler_worker
if isinstance(ref, (int, long)):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
else:
raise SyntaxError(
"You can retrieve results only by id or uuid")
task = self.db(q).select(st.id, st.status, st.assigned_worker_name)
task = task.first()
rtn = None
if not task:
return rtn
if task.status == 'RUNNING':
q = sw.worker_name == task.assigned_worker_name
rtn = self.db(q).update(status=STOP_TASK)
elif task.status == 'QUEUED':
rtn = self.db(q).update(
stop_time=self.now(),
enabled=False,
status=STOPPED)
return rtn
def get_workers(self, only_ticker=False):
""" Returns a dict holding `worker_name : {**columns}`
representing all "registered" workers
only_ticker returns only the workers running as a TICKER,
if there are any
"""
db = self.db
if only_ticker:
workers = db(db.scheduler_worker.is_ticker == True).select()
else:
workers = db(db.scheduler_worker.id > 0).select()
all_workers = {}
for row in workers:
all_workers[row.worker_name] = Storage(
status=row.status,
first_heartbeat=row.first_heartbeat,
last_heartbeat=row.last_heartbeat,
group_names=row.group_names,
is_ticker=row.is_ticker,
worker_stats=row.worker_stats
)
return all_workers
def main():
"""
allows to run worker without python web2py.py .... by simply::
python gluon/scheduler.py
"""
parser = optparse.OptionParser()
parser.add_option(
"-w", "--worker_name", dest="worker_name", default=None,
help="start a worker with name")
parser.add_option(
"-b", "--heartbeat", dest="heartbeat", default=10,
type='int', help="heartbeat time in seconds (default 10)")
parser.add_option(
"-L", "--logger_level", dest="logger_level",
default=30,
type='int',
help="set debug output level (0-100, 0 means all, 100 means none;default is 30)")
parser.add_option("-E", "--empty-runs",
dest="max_empty_runs",
type='int',
default=0,
help="max loops with no grabbed tasks permitted (0 for never check)")
parser.add_option(
"-g", "--group_names", dest="group_names",
default='main',
help="comma separated list of groups to be picked by the worker")
parser.add_option(
"-f", "--db_folder", dest="db_folder",
default='/Users/mdipierro/web2py/applications/scheduler/databases',
help="location of the dal database folder")
parser.add_option(
"-u", "--db_uri", dest="db_uri",
default='sqlite://storage.sqlite',
help="database URI string (web2py DAL syntax)")
parser.add_option(
"-t", "--tasks", dest="tasks", default=None,
help="file containing task files, must define" +
"tasks = {'task_name':(lambda: 'output')} or similar set of tasks")
parser.add_option(
"-U", "--utc-time", dest="utc_time", default=False,
help="work with UTC timestamps"
)
(options, args) = parser.parse_args()
if not options.tasks or not options.db_uri:
print(USAGE)
if options.tasks:
path, filename = os.path.split(options.tasks)
if filename.endswith('.py'):
filename = filename[:-3]
sys.path.append(path)
print('importing tasks...')
tasks = __import__(filename, globals(), locals(), [], -1).tasks
print('tasks found: ' + ', '.join(tasks.keys()))
else:
tasks = {}
group_names = [x.strip() for x in options.group_names.split(',')]
logging.getLogger().setLevel(options.logger_level)
print('groups for this worker: ' + ', '.join(group_names))
print('connecting to database in folder: ' + options.db_folder or './')
print('using URI: ' + options.db_uri)
db = DAL(options.db_uri, folder=options.db_folder, decode_credentials=True)
print('instantiating scheduler...')
scheduler = Scheduler(db=db,
worker_name=options.worker_name,
tasks=tasks,
migrate=True,
group_names=group_names,
heartbeat=options.heartbeat,
max_empty_runs=options.max_empty_runs,
utc_time=options.utc_time)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
print('starting main worker loop...')
scheduler.loop()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"WEB2PY_PATH"
] |
[]
|
["WEB2PY_PATH"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.